17 #if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP) 
   18     #error ERROR: C++ AMP runtime is not supported for applications where WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP. 
   21 #if !(defined (_M_X64) || defined (_M_IX86) || defined (_M_ARM) || defined (_M_ARM64) ) 
   22     #error ERROR: C++ AMP runtime is supported only on X64, X86, ARM, and _M_ARM64 architectures. 
   26     #error ERROR: C++ AMP runtime is not supported when compiling /clr. 
   30     #error ERROR: C++ AMP runtime is supported only for C++. 
   36     #pragma comment(lib, "vcampd") 
   38     #pragma comment(lib, "vcamp") 
   45 #define __GPU      restrict(amp,cpu) 
   46 #define __GPU_ONLY restrict(amp) 
   47 #define __CPU_ONLY restrict(cpu) 
   70 #include <unordered_map> 
   72 #include <unordered_set> 
   79 #define _AMPIMP     __declspec(dllimport) 
  135             return InterlockedIncrement(reinterpret_cast<LONG volatile*>(&
_M_rc));
 
  144             size_t refCount = InterlockedDecrement(reinterpret_cast<LONG volatile*>(&
_M_rc));
 
  187             _Other._M_obj_ptr = 
nullptr;
 
  208                 if (oldPtr != 
NULL) {
 
  222                 _Other._M_obj_ptr = 
nullptr;
 
  225                 if (oldPtr != 
nullptr)
 
  258             if (_M_obj_ptr == 
NULL)
 
  278     class _Accelerator_view_impl;
 
  279     class _CPU_accelerator_view_impl;
 
  280     class _D3D_accelerator_view_impl;
 
  281     class _Accelerator_impl;
 
  283     class _DPC_runtime_factory;
 
  287     struct _DPC_shader_blob;
 
  462             : _M_data_ptr(
NULL), _M_buffer_ptr(NULL),
 
  494                 _M_data_ptr = _Other._M_data_ptr;
 
  495                 _M_curr_cpu_access_mode = _Other._M_curr_cpu_access_mode;
 
  496                 _M_type_access_mode = _Other._M_type_access_mode;
 
  510             if (_M_buffer_ptr != _Buffer_ptr)
 
  512                 if (_M_buffer_ptr != 
NULL) {
 
  518                 if (_M_buffer_ptr != 
NULL) {
 
  524 #if !defined(_CXXAMP) 
  528             _M_buffer_ptr = 
NULL;
 
  544             return ((
const _View_key)(
this));
 
  582             : _M_data_ptr(
NULL), _M_texture_ptr(NULL), _M_most_detailed_mipmap_level(0), _M_view_mipmap_levels(0)
 
  630                 _M_data_ptr = _Other._M_data_ptr;
 
  632                 _M_most_detailed_mipmap_level = _Other._M_most_detailed_mipmap_level;
 
  633                 _M_view_mipmap_levels = _Other._M_view_mipmap_levels;
 
  647             return _M_texture_ptr == _Other._M_texture_ptr 
 
  649                 && _M_most_detailed_mipmap_level == _Other._M_most_detailed_mipmap_level
 
  650                 && _M_view_mipmap_levels == _Other._M_view_mipmap_levels;
 
  655             _ASSERTE(_M_texture_ptr);
 
  671             _M_view_mipmap_levels = _View_mipmap_levels;
 
  676             if (_M_texture_ptr != _Texture_ptr)
 
  678                 if (_M_texture_ptr != 
NULL) {
 
  684                 if (_M_texture_ptr != 
NULL) {
 
  690 #if !defined(_CXXAMP) 
  694             _M_texture_ptr = 
NULL;
 
  708             return !((_M_most_detailed_mipmap_level < _Other->_M_most_detailed_mipmap_level) ? ((_M_most_detailed_mipmap_level + _M_view_mipmap_levels - 1) < _Other->_M_most_detailed_mipmap_level)
 
  734             : _M_data_ptr(
NULL), _M_sampler_ptr(NULL)
 
  762                 _M_data_ptr = _Other._M_data_ptr;
 
  777             return _M_sampler_ptr == _Other._M_sampler_ptr && _M_data_ptr == _Other.
_M_data_ptr;
 
  787             if (_M_sampler_ptr != _Sampler_ptr)
 
  789                 if (_M_sampler_ptr != 
NULL) {
 
  795                 if (_M_sampler_ptr != 
NULL) {
 
  801 #if !defined(_CXXAMP) 
  805             _M_sampler_ptr = 
NULL;
 
 1004         scoped_d3d_access_lock & 
operator=(
const scoped_d3d_access_lock &_Other);
 
 1058         _Init(_Device_path.c_str());
 
 1084         std::vector<accelerator> _AcceleratorVector;
 
 1086         for (
size_t _I = 0; (_I < _NumDevices); ++_I)
 
 1091         return _AcceleratorVector;
 
 1282         : _M_shared_future(_Other._M_shared_future),
 
 1283         _M_task(_Other._M_task)
 
 1291         : _M_shared_future(
std::
move(_Other._M_shared_future)),
 
 1292         _M_task(
std::
move(_Other._M_task))
 
 1306     completion_future& 
operator=(
const completion_future& _Other)
 
 1308         if (
this != &_Other) {
 
 1321         if (
this != &_Other) {
 
 1322             _M_shared_future = 
std::move(_Other._M_shared_future);
 
 1336         _M_shared_future.get();
 
 1349         return _M_shared_future.valid();
 
 1357         _M_shared_future.wait();
 
 1369     template <
class _Rep, 
class _Period>
 
 1372         return _M_shared_future.wait_for(_Rel_time);
 
 1384     template <
class _Clock, 
class _Duration>
 
 1387         return _M_shared_future.wait_until(_Abs_time);
 
 1398     operator std::shared_future<void>() 
const 
 1400         return _M_shared_future;
 
 1407     template <
typename _Functor>
 
 1408     void then(
const _Functor &_Func)
 const 
 1410         this->to_task().then(_Func);
 
 1431                       : _M_shared_future(_Shared_future), _M_task(_Task)
 
 1450     friend class details::_D3D_accelerator_view_impl;
 
 1451     friend class details::_CPU_accelerator_view_impl;
 
 1511     _AMPIMP bool get_is_auto_selection() const;
 
 1512     __declspec(property(
get=get_is_auto_selection)) 
bool is_auto_selection;
 
 1546     _AMPIMP accelerator_view(_Accelerator_view_impl_ptr _Impl, 
bool _Auto_selection = 
false);
 
 1551     bool _M_auto_selection;
 
 1573             std::hash<_Accelerator_view_impl*> _HashFunctor;
 
 1586                                                                      const unsigned int *_Base_extent, 
const unsigned int *_View_offset,
 
 1587                                                                      const unsigned int *_View_extent, 
const bool *_Projection_info = 
NULL);
 
 1598             return _M_linear_offset;
 
 1603             return _M_base_extent;
 
 1608             return _M_view_offset;
 
 1612             return _M_view_extent;
 
 1617             return _M_projection_info;
 
 1622             return _M_projection_info[0];
 
 1628             size_t endLinearOffset = _M_linear_offset + _Get_extent_size(_M_rank, _M_base_extent);
 
 1629             if (endLinearOffset > _Buffer_size) {
 
 1638             return _Get_extent_size(_M_rank, _M_view_extent);
 
 1643             return _Get_linear_offset(_M_view_offset);
 
 1649             _ASSERTE((_Rank >= 1) && (_Extent1 != 
NULL)&& (_Extent2 != 
NULL));
 
 1652             if ((_Extent1[_Rank - 1] * _Elem_size1) != (_Extent2[_Rank - 1] * _Elem_size2)) 
 
 1658             if ((_Rank > 1) && !_Compare_extent(_Rank - 1, _Extent1, _Extent2))
 
 1668         _Compare_extent(
unsigned int _Rank, 
const unsigned int *_Extent1, 
const unsigned int *_Extent2)
 
 1670             for (
size_t _I = 0; _I < _Rank; ++_I) {
 
 1671                 if (_Extent1[_I] != _Extent2[_I]) {
 
 1679         inline bool _Is_view_linear(
unsigned int &_Linear_offset, 
unsigned int &_Linear_size)
 const 
 1683             unsigned int _First_dim_with_non_unit_extent = 0;
 
 1684             while ((_First_dim_with_non_unit_extent < _M_rank) && (_M_view_extent[_First_dim_with_non_unit_extent] == 1)) {
 
 1685                 _First_dim_with_non_unit_extent++;
 
 1688             unsigned int _Effective_rank = (_M_rank - _First_dim_with_non_unit_extent);
 
 1693             if ((_Effective_rank <= 1) ||
 
 1694                 (_Compare_extent(_Effective_rank - 1, &_M_base_extent[_First_dim_with_non_unit_extent + 1], &_M_view_extent[_First_dim_with_non_unit_extent + 1]))) 
 
 1696                 _Linear_offset = _Get_view_linear_offset();
 
 1697                 _Linear_size = _Get_view_size();
 
 1706             if (_Compare_base_shape(_Other))
 
 1711                 for (
size_t _I = 0; _I < _M_rank; ++_I)
 
 1713                     if (!_Intervals_overlap(_M_view_offset[_I], _M_view_offset[_I] + _M_view_extent[_I] - 1,
 
 1725                 size_t firstStart = _Get_view_linear_offset();
 
 1726                 size_t firstEnd = firstStart + _Get_view_size() - 1;
 
 1731                 return _Intervals_overlap(firstStart, firstEnd, secondStart, secondEnd);
 
 1739             if ((_M_rank == 1) && (_Other->
_Get_rank() == 1)) 
 
 1741                 size_t thisStart = _Get_view_linear_offset();
 
 1742                 size_t thisEnd = thisStart + _Get_view_size() - 1;
 
 1747                 return ((otherStart >= thisStart) && (otherEnd <= thisEnd));
 
 1750             if (!_Compare_base_shape(_Other)) {
 
 1758             std::vector<unsigned int> otherEndPointIndex(_M_rank);
 
 1759             for (
size_t _I = 0; _I < _M_rank; ++_I) {
 
 1763             return _Contains(otherEndPointIndex.data());
 
 1768         _View_shape(
unsigned int _Rank, 
unsigned int _Linear_offset,
 
 1769                     const unsigned int *_Base_extent, 
const unsigned int *_View_offset,
 
 1770                     const unsigned int *_View_extent, 
const bool *_Projection_info);
 
 1783                                        size_t _Second_start, 
size_t _Second_end)
 
 1786             if (_First_start > _Second_start) {
 
 1787                 size_t temp = _First_start;
 
 1788                 _First_start = _Second_start;
 
 1789                 _Second_start = temp;
 
 1792                 _First_end = _Second_end;
 
 1797             return (_Second_start <= _First_end);
 
 1802             unsigned int totalExtent = 1;
 
 1803             for (
size_t _I = 0; _I < _Rank; ++_I) {
 
 1804                 totalExtent *= _Extent[_I];
 
 1817             size_t viewSize = 1;
 
 1819             for (
size_t _I = 0; _I < _M_rank; ++_I)
 
 1821                 viewSize *= _M_view_extent[_I];
 
 1822                 if ((_M_view_offset[_I] + _M_view_extent[_I]) > _M_base_extent[_I]) {
 
 1827             if (viewSize == 0) {
 
 1836             return ((_M_rank == _Other->
_M_rank) &&
 
 1838                     _Compare_extent(_M_rank, _M_base_extent, _Other->
_M_base_extent));
 
 1845         inline bool _Contains(
const unsigned int* _Element_index)
 const 
 1847             for (
size_t _I = 0; _I < _M_rank; ++_I)
 
 1849                 if ((_Element_index[_I] < _M_view_offset[_I]) ||
 
 1850                     (_Element_index[_I] >= (_M_view_offset[_I] + _M_view_extent[_I]))) 
 
 1861             unsigned int currMultiplier = 1;
 
 1862             unsigned int linearOffset = _M_linear_offset;
 
 1863             for (
int _I = static_cast<int>(_M_rank - 1); _I >= 0; _I--)
 
 1865                 linearOffset += (currMultiplier * _Element_index[_I]);
 
 1866                 currMultiplier *= _M_base_extent[_I];
 
 1869             return linearOffset;
 
 1887         unsigned int _Rank = _Source_shape->
_Get_rank();
 
 1889         size_t _BaseLSDExtentInBytes = (_Source_shape->
_Get_base_extent())[_Rank - 1] * _Curr_elem_size;
 
 1890         size_t _ViewLSDOffsetInBytes = (_Source_shape->
_Get_view_offset())[_Rank - 1] * _Curr_elem_size;
 
 1891         size_t _ViewLSDExtentInBytes = (_Source_shape->
_Get_view_extent())[_Rank - 1] * _Curr_elem_size;
 
 1893         _ASSERTE((_LinearOffsetInBytes % _New_elem_size) == 0);
 
 1894         _ASSERTE((_BaseLSDExtentInBytes % _New_elem_size) == 0);
 
 1895         _ASSERTE((_ViewLSDOffsetInBytes % _New_elem_size) == 0);
 
 1896         _ASSERTE((_ViewLSDExtentInBytes % _New_elem_size) == 0);
 
 1898         size_t _Temp_val = _LinearOffsetInBytes / _New_elem_size;
 
 1900         unsigned int _New_linear_offset = 
static_cast<unsigned int>(_Temp_val);
 
 1902         std::vector<unsigned int> _New_base_extent(_Rank);
 
 1903         std::vector<unsigned int> _New_view_offset(_Rank);
 
 1904         std::vector<unsigned int> _New_view_extent(_Rank);
 
 1905         for (
unsigned int i = 0; 
i < _Rank - 1; ++
i) {
 
 1912         _Temp_val = _BaseLSDExtentInBytes / _New_elem_size;
 
 1914         _New_base_extent[_Rank - 1] = 
static_cast<unsigned int>(_Temp_val);
 
 1916         _Temp_val = _ViewLSDOffsetInBytes / _New_elem_size;
 
 1918         _New_view_offset[_Rank - 1] = 
static_cast<unsigned int>(_Temp_val);
 
 1920         _Temp_val = _ViewLSDExtentInBytes / _New_elem_size;
 
 1922         _New_view_extent[_Rank - 1] = 
static_cast<unsigned int>(_Temp_val);
 
 1924         return _View_shape::_Create_view_shape(_Rank, _New_linear_offset, _New_base_extent.data(), _New_view_offset.data(), _New_view_extent.data());
 
 1929         switch(cpu_access_type)
 
 1956         return _Cpu_access_type;
 
 1962         friend class _CPU_accelerator_view_impl;
 
 1963         friend class _D3D_accelerator_view_impl;
 
 1964         friend class _D3D_temp_staging_cache;
 
 1972         _AMPIMP static _Ret_ _Buffer * __cdecl _Create_buffer(accelerator_view _Accelerator_view, accelerator_view _Access_on_accelerator_view, 
size_t _Num_elems,
 
 1977         _AMPIMP static _Ret_ _Buffer * __cdecl _Create_buffer(
_In_ void *_Data_ptr, accelerator_view _Accelerator_view, 
size_t _Num_elems,
 
 1981         _AMPIMP static _Ret_ _Buffer * __cdecl _Create_stage_buffer(accelerator_view _Accelerator_view, accelerator_view _Access_on_accelerator_view,
 
 1982                                                                     size_t _Num_elems, 
size_t _Elem_size, 
bool _Is_temp = 
false);
 
 1986         _AMPIMP static _Ret_ _Buffer * __cdecl _Get_temp_staging_buffer(accelerator_view _Av, 
size_t _Requested_num_elems, 
size_t _Elem_size);
 
 1998         _AMPIMP _Event _Copy_to_async(
_Out_ _Buffer * _Dest, 
size_t _Num_elems, 
size_t _Src_offset = 0, 
size_t _Dest_offset = 0);
 
 2003         _AMPIMP accelerator_view _Get_accelerator_view() 
const;
 
 2004         _AMPIMP accelerator_view _Get_access_on_accelerator_view() 
const;
 
 2026             return _M_elem_size;
 
 2031             return _M_num_elems;
 
 2036             return _M_accelerator_view;
 
 2041             return _M_access_on_accelerator_view;
 
 2046             return _M_owns_data;
 
 2049         _AMPIMP bool _Exclusively_owns_data();
 
 2053             return _M_is_staging;
 
 2058             return _M_allowed_host_access_mode;
 
 2068             return ((_Get_allowed_host_access_mode() & _Requested_access_mode) == _Requested_access_mode);
 
 2073             return _M_current_host_access_mode;
 
 2084             return _M_is_adopted;
 
 2089             return _M_is_buffer;
 
 2092         _AMPIMP bool _Is_mappable() 
const;
 
 2098         _Buffer(
_In_ _Accelerator_view_impl* _Av, 
_In_ void *_Buffer_data_ptr, 
_In_ void * _Host_ptr,
 
 2100                 size_t _Elem_size, 
bool _Owns_data, 
bool _Is_staging, 
bool _Is_temp, 
bool _Is_adopted);
 
 2112             _ASSERTE((_Host_ptr == 
NULL) || (_Host_access_mode != 
_No_access));
 
 2114             _M_host_ptr = _Host_ptr;
 
 2115             if (_Host_ptr == 
NULL) {
 
 2119                 _M_current_host_access_mode = _Host_access_mode;
 
 2125             _M_data_ptr = _Data_ptr;
 
 2154         friend class _CPU_accelerator_view_impl;
 
 2155         friend class _D3D_accelerator_view_impl;
 
 2156         friend class _D3D_temp_staging_cache;
 
 2163                                                                 size_t _Width, 
size_t _Height, 
size_t _Depth,
 
 2164                                                                 unsigned int _Mip_levels,
 
 2166                                                                 unsigned int _Num_channels,
 
 2167                                                                 unsigned int _Bits_per_channel,
 
 2168                                                                 bool _Is_temp = 
false);
 
 2173                                                                 _In_ IUnknown *_Data_ptr, accelerator_view _Accelerator_view,
 
 2174                                                                 unsigned int _View_format);
 
 2177         _AMPIMP static _Ret_ _Texture * __cdecl _Create_stage_texture(accelerator_view _Accelerator_view, accelerator_view _Access_on_accelerator_view,
 
 2179                                                                       size_t _Width, 
size_t _Height, 
size_t _Depth,
 
 2180                                                                       unsigned int _Mip_levels,
 
 2181                                                                       unsigned int _Format,
 
 2182                                                                       bool _Is_temp = 
false);
 
 2185         _AMPIMP static _Ret_ _Texture * __cdecl _Create_stage_texture(accelerator_view _Accelerator_view, accelerator_view _Access_on_accelerator_view,
 
 2187                                                                       size_t _Width, 
size_t _Height, 
size_t _Depth,
 
 2188                                                                       unsigned int _Mip_levels,
 
 2190                                                                       unsigned int _Num_channels,
 
 2191                                                                       unsigned int _Bits_per_channel);
 
 2195         _AMPIMP static _Ret_ _Texture * __cdecl _Get_temp_staging_texture(accelerator_view _Accelerator_view,
 
 2197                                                                           size_t _Width, 
size_t _Height, 
size_t _Depth,
 
 2198                                                                           unsigned int _Mip_levels,
 
 2199                                                                           unsigned int _Format);
 
 2202         _AMPIMP static _Ret_ _Texture * __cdecl _Clone_texture(
const _Texture *_Src, 
const accelerator_view &_Accelerator_view, 
const accelerator_view &_Associated_av);
 
 2207                                       const size_t *_Src_offset, 
const size_t *_Dst_offset,
 
 2208                                       unsigned int _Src_mipmap_level, 
unsigned int _Dst_mipmap_level);
 
 2212             return (_M_width >> _Mip_offset) ? (_M_width >> _Mip_offset) : 1U;
 
 2217             return (_M_height >> _Mip_offset) ? (_M_height >> _Mip_offset) : 1U;
 
 2222             return (_M_depth >> _Mip_offset) ? (_M_depth >> _Mip_offset) : 1U;
 
 2232             return _M_texture_format;
 
 2237             return _M_view_format;
 
 2242             return _M_num_channels;
 
 2248             return _Is_adopted() ? 0 : _M_bits_per_channel;
 
 2253             return _M_bits_per_channel * _M_num_channels;
 
 2256         unsigned int _Get_data_length(
unsigned int _Most_detailed_mipmap_level, 
unsigned int _View_mipmap_levels, 
const size_t *_Extents = 
nullptr) const  
 
 2258             _ASSERTE(_View_mipmap_levels);
 
 2260             unsigned long long _Bits_per_byte = 8ULL;
 
 2261             unsigned long long _Total_bytes = 0ULL;
 
 2263             unsigned int _Mip_level = _Most_detailed_mipmap_level;
 
 2266             for (
unsigned int _Mip_offset=0; _Mip_offset < _View_mipmap_levels; ++_Mip_offset)
 
 2268                 unsigned long long _Width = 1ULL;
 
 2269                 unsigned long long _Height = 1ULL;
 
 2270                 unsigned long long _Depth = 1ULL;
 
 2277                         _Depth = (_Extents[2] >> _Mip_level) ? (_Extents[2] >> _Mip_level) : 1U;
 
 2280                         _Height = (_Extents[1] >> _Mip_level) ? (_Extents[1] >> _Mip_level) : 1U;
 
 2283                         _Width = (_Extents[0] >> _Mip_level) ? (_Extents[0] >> _Mip_level) : 1U;
 
 2291                     _Width = _Get_width(_Mip_level);
 
 2292                     _Height = _Get_height(_Mip_level);
 
 2293                     _Depth = _Get_depth(_Mip_level);
 
 2298                 _Total_bytes += ((_Width * _Height * _Depth * 
static_cast<unsigned long long>(_Get_bits_per_element())) + _Bits_per_byte - 1) / _Bits_per_byte;
 
 2303             return static_cast<unsigned int>(_Total_bytes);
 
 2308             return _M_mip_levels;
 
 2313             return _M_row_pitch;
 
 2318             _M_row_pitch = 
_Val;
 
 2323             return _M_depth_pitch;
 
 2328             _M_depth_pitch = 
_Val;
 
 2335         _Texture(
_In_ _Accelerator_view_impl* _Av, 
_In_ void *_Texture_data_ptr, 
_In_ void * _Host_ptr,
 
 2338                  size_t _Width, 
size_t _Height, 
size_t _Depth,
 
 2339                  unsigned int _Mip_levels,
 
 2340                  unsigned int _Texture_format,
 
 2341                  unsigned int _View_format,
 
 2342                  unsigned int _Num_channels,
 
 2343                  unsigned int _Bits_per_channel,
 
 2344                  bool _Owns_data, 
bool _Is_staging, 
bool _Is_temp, 
bool _Is_adopted);
 
 2374             unsigned int _Filter_mode,
 
 2375             unsigned int _Address_mode,
 
 2394             return _M_is_adopted;
 
 2399             return _M_filter_mode;
 
 2404             return _M_address_mode;
 
 2409             return &_M_border_color[0];
 
 2415         _Sampler(
unsigned int _Filter_mode, 
unsigned int _Address_mode, 
float _Border_r, 
float _Border_g, 
float _Border_b, 
float _Border_a);
 
 2431         float _M_border_color[4];
 
 2437                                       size_t _Num_elems, 
size_t _Preferred_copy_chunk_num_elems = 0);
 
 2440                                             _Out_ _Texture *_Dst_tex, 
const size_t *_Dst_offset, 
unsigned int _Dst_mipmap_level,
 
 2441                                             const size_t *_Copy_extent, 
const size_t *_Preferred_copy_chunk_extent = 
NULL);
 
 2445         bool _Truncated_copy = 
false;
 
 2446         size_t _Allocation_extent[3] = { _Copy_chunk_extent[0], _Copy_chunk_extent[1], _Copy_chunk_extent[2] };
 
 2448         unsigned int _Most_sig_idx = _Tex->_Get_rank() - 1;
 
 2450         if (_Allocation_extent[_Most_sig_idx] > _Remaining_copy_extent[_Most_sig_idx]) {
 
 2451             _Allocation_extent[_Most_sig_idx] = _Remaining_copy_extent[_Most_sig_idx];
 
 2454         _Texture_ptr _Stage = _Texture::_Get_temp_staging_texture(_Tex->_Get_accelerator_view(), _Tex->_Get_rank(),
 
 2455             _Allocation_extent[0], _Allocation_extent[1], _Allocation_extent[2],
 
 2456             1, _Tex->_Get_texture_format());
 
 2459         size_t _Staging_tex_extent[3] = {_Stage->_Get_width(), _Stage->_Get_height(), _Stage->_Get_depth()};
 
 2460         if (_Curr_copy_extent[_Most_sig_idx] > _Staging_tex_extent[_Most_sig_idx]) {
 
 2461             _Curr_copy_extent[_Most_sig_idx] = _Staging_tex_extent[_Most_sig_idx];
 
 2466         if (_Curr_copy_extent[_Most_sig_idx] < _Remaining_copy_extent[_Most_sig_idx]) 
 
 2468             _Remaining_copy_extent[_Most_sig_idx] -= _Curr_copy_extent[_Most_sig_idx];
 
 2469             _Truncated_copy = 
true;
 
 2472         for (
unsigned int _I = 0; _I < _Most_sig_idx; _I++)
 
 2474             _ASSERTE(_Curr_copy_extent[_I] == _Remaining_copy_extent[_I]);
 
 2477         *_Staging_texture = _Stage;
 
 2478         return _Truncated_copy;
 
 2481     #pragma warning ( push ) 
 2482     #pragma warning ( disable : 6101 ) 
 2487     template <
typename _Input_iterator, 
typename _Value_type>
 
 2489         (
int _Rank, _Input_iterator _Src, 
_Out_ _Value_type *_Dst,
 
 2490          size_t _Width, 
size_t _Height, 
size_t _Depth,
 
 2491          size_t _Dst_row_pitch_in_bytes, 
size_t _Dst_depth_pitch_in_bytes,
 
 2492          size_t _Src_row_pitch, 
size_t _Src_depth_pitch)
 
 2498                 _Input_iterator _End = _Src;
 
 2505                 unsigned char *_Dst_ptr = 
reinterpret_cast<unsigned char *
>(_Dst);
 
 2506                 _Input_iterator _Src_start = _Src;
 
 2507                 for (
size_t _I = 0; _I < _Height; _I++)
 
 2509                     _Input_iterator _Src_end = _Src_start;
 
 2514                     _Dst_ptr += _Dst_row_pitch_in_bytes;
 
 2521                 unsigned char *_Dst_ptr_slice_start = 
reinterpret_cast<unsigned char *
>(_Dst);
 
 2522                 _Input_iterator _Src_depth_slice_start = _Src;
 
 2523                 for (
size_t _I = 0; _I < _Depth; _I++)
 
 2525                     _Input_iterator _Src_start = _Src_depth_slice_start;
 
 2526                     unsigned char *_Dst_ptr = _Dst_ptr_slice_start;
 
 2528                     for (
size_t _J = 0; _J < _Height; _J++)
 
 2530                         _Input_iterator _Src_end = _Src_start;
 
 2535                         _Dst_ptr += _Dst_row_pitch_in_bytes;
 
 2539                     _Dst_ptr_slice_start += _Dst_depth_pitch_in_bytes;
 
 2540                     std::advance(_Src_depth_slice_start, _Src_depth_pitch);
 
 2549     #pragma warning ( pop ) // disable : 6101 
 2551     template <
typename _Output_iterator, 
typename _Value_type>
 
 2553         (
int _Rank, 
const _Value_type * _Src, _Output_iterator _Dst,
 
 2554          size_t _Width, 
size_t _Height, 
size_t _Depth,
 
 2555          size_t _Src_row_pitch_in_bytes, 
size_t _Src_depth_pitch_in_bytes,
 
 2556          size_t _Dst_row_pitch, 
size_t _Dst_depth_pitch)
 
 2562                 const _Value_type * _End = _Src + _Width;
 
 2568                 const unsigned char *_Src_ptr = 
reinterpret_cast<const unsigned char *
>(_Src);
 
 2569                 _Output_iterator _Dst_iter = _Dst;
 
 2570                 for (
size_t _I = 0; _I < _Height; _I++)
 
 2572                     const _Value_type * _Src_end = 
reinterpret_cast<const _Value_type*
>(_Src_ptr) + _Width;
 
 2576                     _Src_ptr += _Src_row_pitch_in_bytes;
 
 2582                 const unsigned char *_Src_ptr_slice_start = 
reinterpret_cast<const unsigned char *
>(_Src);
 
 2583                 _Output_iterator _Dst_depth_slice_start = _Dst;
 
 2584                 for (
size_t _I = 0; _I < _Depth; _I++)
 
 2586                     _Output_iterator _Dst_iter = _Dst_depth_slice_start;
 
 2587                     const unsigned char *_Src_ptr = _Src_ptr_slice_start;
 
 2589                     for (
size_t _J = 0; _J < _Height; _J++)
 
 2591                         const _Value_type * _Src_end = 
reinterpret_cast<const _Value_type *
>(_Src_ptr) + _Width;
 
 2596                         _Src_ptr += _Src_row_pitch_in_bytes;
 
 2599                     _Src_ptr_slice_start += _Src_depth_pitch_in_bytes;
 
 2600                     std::advance(_Dst_depth_slice_start, _Dst_depth_pitch);
 
 2616         return (preferredChunkSize / _Elem_size);
 
 2620                                                  size_t _Depth, 
size_t _Bits_per_element, 
_Out_writes_(3) 
size_t *_Preferred_copy_chunk_extent)
 
 2622         _ASSERTE(_Preferred_copy_chunk_extent != 
nullptr);
 
 2624         size_t requestedByteSize = 
static_cast<size_t>((
static_cast<unsigned long long>(_Width) *
 
 2625                                                         static_cast<unsigned long long>(_Height) *
 
 2626                                                         static_cast<unsigned long long>(_Depth) *
 
 2627                                                         static_cast<unsigned long long>(_Bits_per_element)) >> 3);
 
 2632         size_t preferredCopyChunkNumElems = 
static_cast<size_t>((
static_cast<unsigned long long>(preferredChunkSize) * 8U) / _Bits_per_element);
 
 2639             _Width = preferredCopyChunkNumElems;
 
 2642             _Height = (preferredCopyChunkNumElems + _Width - 1) / _Width;
 
 2645             _Depth = (preferredCopyChunkNumElems + (_Height * _Width) - 1) / (_Height * _Width);
 
 2651         _Preferred_copy_chunk_extent[0] = _Width;
 
 2652         _Preferred_copy_chunk_extent[1] = _Height;
 
 2653         _Preferred_copy_chunk_extent[2] = _Depth;
 
 2657     template <
typename _T>
 
 2660         static_assert(std::is_unsigned<_T>::value, 
"This GCD function only supports unsigned integral types");
 
 2662         _ASSERTE((_M > 0) && (_N > 0));
 
 2680     template <
typename _T>
 
 2683         static_assert(std::is_unsigned<_T>::value, 
"This LCM function only supports unsigned integral types");
 
 2685         _ASSERTE((_M > 0) && (_N > 0));
 
 2688         return ((_M / _Gcd) * _N);
 
 2691     template <
typename InputIterator, 
typename _Value_type>
 
 2692     inline _Event _Copy_impl(InputIterator _SrcFirst, InputIterator _SrcLast, 
size_t _NumElemsToCopy,
 
 2693                              _Out_ _Buffer * _Dst, 
size_t _Dest_offset, 
size_t _Preferred_copy_chunk_num_elems = 0)
 
 2695         if (_NumElemsToCopy == 0) {
 
 2703 #pragma warning ( push ) 
 2704 #pragma warning ( disable : 6001 ) // Using uninitialized memory '*_Dst' 
 2705         if (((_NumElemsToCopy * 
sizeof(_Value_type)) + (_Dest_offset * _Dst->_Get_elem_size())) > (_Dst->_Get_num_elems() * _Dst->_Get_elem_size()))
 
 2707             throw runtime_exception(
"Invalid _Src argument(s). _Src size exceeds total size of the _Dest.", E_INVALIDARG);
 
 2709 #pragma warning ( pop ) 
 2711         _ASSERTE(_NumElemsToCopy == (
size_t)(
std::distance(_SrcFirst, _SrcLast)));
 
 2721             _Buffer_ptr _PDestBuf = _Dst;
 
 2724                 _Value_type *_DestPtr = 
reinterpret_cast<_Value_type*
>(
reinterpret_cast<char*
>(_PDestBuf->_Get_host_ptr()) + (_Dest_offset * _PDestBuf->_Get_elem_size()));
 
 2737             _Buffer_ptr _PDestBuf = _Dst;
 
 2738             size_t _NumElemsToCopyRemaining = _NumElemsToCopy;
 
 2739             size_t _PreferredNumElemsToCopyPerChunk = _Preferred_copy_chunk_num_elems;
 
 2740             if (_PreferredNumElemsToCopyPerChunk == 0) {
 
 2745             size_t _CurrDstOffset = _Dest_offset;
 
 2746             InputIterator _CurrStartIter = _SrcFirst;
 
 2750             size_t _AdjustmentRatio = _Lcm / 
sizeof(_Value_type);
 
 2754                 size_t _AllocationNumElems = _PreferredNumElemsToCopyPerChunk;
 
 2755                 if (_NumElemsToCopyRemaining < _AllocationNumElems) {
 
 2756                     _AllocationNumElems = _NumElemsToCopyRemaining;
 
 2759                 _Buffer_ptr _PDestStagingBuf = _Buffer::_Get_temp_staging_buffer(_Dst->_Get_accelerator_view(),
 
 2760                                                                                  _AllocationNumElems, 
sizeof(_Value_type));
 
 2762                 _ASSERTE(_PDestStagingBuf != 
NULL);
 
 2763                 _ASSERTE(_PDestStagingBuf->_Get_elem_size() == 
sizeof(_Value_type));
 
 2765                 InputIterator _CurrEndIter = _CurrStartIter;
 
 2766                 size_t _CurrNumElemsToCopy = _AllocationNumElems;
 
 2767                 if (_CurrNumElemsToCopy > _PDestStagingBuf->_Get_num_elems()) {
 
 2768                     _CurrNumElemsToCopy = _PDestStagingBuf->_Get_num_elems();
 
 2771                 if (_NumElemsToCopyRemaining <= _CurrNumElemsToCopy) {
 
 2772                     _CurrNumElemsToCopy = _NumElemsToCopyRemaining;
 
 2773                     _CurrEndIter = _SrcLast;
 
 2779                     _CurrNumElemsToCopy = (_CurrNumElemsToCopy / _AdjustmentRatio) * _AdjustmentRatio;
 
 2783                 _ASSERTE((_CurrNumElemsToCopy % _AdjustmentRatio) == 0);
 
 2792                 _Ev = _Ev.
_Add_event(_PDestStagingBuf->_Copy_to_async(_PDestBuf, _CurrNumElemsToCopy, 0, _CurrDstOffset));
 
 2795                 _NumElemsToCopyRemaining -= _CurrNumElemsToCopy;
 
 2796                 _CurrDstOffset += (_CurrNumElemsToCopy * 
sizeof(_Value_type)) / _Dst->_Get_elem_size();
 
 2797                 _CurrStartIter = _CurrEndIter;
 
 2799             } 
while (_NumElemsToCopyRemaining != 0);
 
 2808     template<
typename _InputIterator, 
typename _Distance>
 
 2809     typename std::enable_if<std::is_base_of<std::input_iterator_tag, typename std::iterator_traits<_InputIterator>::iterator_category>::value>::type
 
 2815     template<
typename _OutputIterator, 
typename _Distance>
 
 2816     typename std::enable_if<!std::is_base_of<std::input_iterator_tag, typename std::iterator_traits<_OutputIterator>::iterator_category>::value>::type
 
 2819         for (
size_t i = 0; 
i < _N; ++
i)
 
 2825     template <
typename OutputIterator, 
typename _Value_type>
 
 2827                              OutputIterator _DestIter, 
size_t _Preferred_copy_chunk_num_elems = 0)
 
 2829         if ((_Src == 
NULL) || ((_Src_offset + _Num_elems) > _Src->_Get_num_elems())) {
 
 2833         if (_Num_elems == 0) {
 
 2837         size_t _NumElemsToCopy = (_Num_elems * _Src->_Get_elem_size()) / 
sizeof(_Value_type);
 
 2847             _Buffer_ptr _PSrcBuf = _Src;
 
 2851                 const _Value_type *_PFirst = 
reinterpret_cast<const _Value_type*
>(
reinterpret_cast<char*
>(_PSrcBuf->_Get_host_ptr()) + (_Src_offset * _PSrcBuf->_Get_elem_size()));
 
 2852                 std::copy(_PFirst, _PFirst + _NumElemsToCopy, _DestIter);
 
 2867             _Buffer_ptr _PSrcBuf = _Src;
 
 2868             size_t _PreferredNumElemsToCopyPerChunk = _Preferred_copy_chunk_num_elems;
 
 2869             if (_PreferredNumElemsToCopyPerChunk == 0) {
 
 2875             size_t _AllocationNumElems = _PreferredNumElemsToCopyPerChunk;
 
 2876             if (_NumElemsToCopy < _AllocationNumElems) {
 
 2877                 _AllocationNumElems = _NumElemsToCopy;
 
 2880             _Buffer_ptr _PSrcStagingBuf = _Buffer::_Get_temp_staging_buffer(_Src->_Get_accelerator_view(),
 
 2881                                                                             _AllocationNumElems, 
sizeof(_Value_type));
 
 2883             _ASSERTE(_PSrcStagingBuf != 
NULL);
 
 2884             _ASSERTE(_PSrcStagingBuf->_Get_elem_size() == 
sizeof(_Value_type));
 
 2889             size_t _AdjustmentRatio = _Lcm / 
sizeof(_Value_type);
 
 2891             size_t _CurrNumElemsToCopy = _AllocationNumElems;
 
 2892             if (_CurrNumElemsToCopy > _PSrcStagingBuf->_Get_num_elems()) {
 
 2893                 _CurrNumElemsToCopy = _PSrcStagingBuf->_Get_num_elems();
 
 2895             if (_NumElemsToCopy <= _CurrNumElemsToCopy)
 
 2897                 _CurrNumElemsToCopy = _NumElemsToCopy;
 
 2903                 _CurrNumElemsToCopy = (_CurrNumElemsToCopy / _AdjustmentRatio) * _AdjustmentRatio;
 
 2906             _ASSERTE((_CurrNumElemsToCopy % _AdjustmentRatio) == 0);
 
 2908             size_t _NumElemsToCopyRemaining = _NumElemsToCopy - _CurrNumElemsToCopy;
 
 2910             _Ev = _PSrcBuf->_Copy_to_async(_PSrcStagingBuf, (_CurrNumElemsToCopy * 
sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size(), _Src_offset, 0);
 
 2912             if (_NumElemsToCopyRemaining != 0)
 
 2915                                                                      _CurrNumElemsToCopy, _NumElemsToCopyRemaining,
 
 2916                                                                      _Src_offset, _PreferredNumElemsToCopyPerChunk]() 
mutable -> 
_Event  
 2920                     size_t _CurrSrcOffset = _Src_offset + ((_CurrNumElemsToCopy * 
sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size());
 
 2921                     OutputIterator _CurrDestIter = _DestIter;
 
 2922                     _Advance_output_iterator<decltype(_CurrDestIter), size_t>(_CurrDestIter, _CurrNumElemsToCopy);
 
 2923                     _Event _Ret_ev = _Copy_impl<OutputIterator, _Value_type>(_PSrcBuf.
_Get_ptr(), _CurrSrcOffset,
 
 2924                                                                              (_NumElemsToCopyRemaining * 
sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size(),
 
 2925                                                                              _CurrDestIter, _PreferredNumElemsToCopyPerChunk);
 
 2928                     _Value_type *_PFirst = 
reinterpret_cast<_Value_type*
>(_PSrcStagingBuf->_Get_host_ptr());
 
 2929                     std::copy(_PFirst, _PFirst + _CurrNumElemsToCopy, _DestIter);
 
 2937                     _Value_type *_PFirst = 
reinterpret_cast<_Value_type*
>(_PSrcStagingBuf->_Get_host_ptr());
 
 2938                     std::copy(_PFirst, _PFirst + _CurrNumElemsToCopy, _DestIter);
 
 2953             const unsigned int _Rank,
 
 2954             const unsigned int _Src_linear_offset,
 
 2955             const unsigned int * _Src_extents, 
 
 2956             const unsigned int * _Src_copy_offset,
 
 2957             const unsigned int _Dst_linear_offset,
 
 2958             const unsigned int * _Dst_extents, 
 
 2959             const unsigned int * _Dst_copy_offset, 
 
 2960             const unsigned int * _Copy_extents)
 
 2962             this->_Rank = _Rank;
 
 2964             this->_Src_linear_offset = _Src_linear_offset;
 
 2965             this->_Src_extents.assign( _Src_extents, _Src_extents + _Rank);
 
 2966             this->_Src_copy_offset.assign( _Src_copy_offset, _Src_copy_offset + _Rank);
 
 2968             this->_Dst_linear_offset = _Dst_linear_offset;
 
 2969             this->_Dst_extents.assign( _Dst_extents, _Dst_extents + _Rank);
 
 2970             this->_Dst_copy_offset.assign( _Dst_copy_offset, _Dst_copy_offset + _Rank);
 
 2972             this->_Copy_extents.assign( _Copy_extents, _Copy_extents + _Rank);
 
 2995                                                   unsigned int _Native_copy_rank,
 
 2996                                                   std::function<HRESULT(
const _Array_copy_desc &_Reduced)> _Native_copy_func);
 
 3002     template<
typename _InputInterator, 
typename _OutputIterator>
 
 3005         std::copy(_SrcFirst, _SrcLast, _DstFirst);
 
 3010     template <
typename InputIterator, 
typename _Value_type>
 
 3013         _ASSERTE(_Dst != 
NULL);
 
 3014         _ASSERTE(_Src_shape != 
NULL);
 
 3015         _ASSERTE(_Dst_shape != 
NULL);
 
 3017         if (_Src_shape->_Is_projection()) {
 
 3018             _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
 
 3021         if (_Dst_shape->_Is_projection()) {
 
 3022             _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
 
 3025         _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
 
 3027         _ASSERTE(_View_shape::_Compare_extent_with_elem_size(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(),
 
 3028                                                              sizeof(_Value_type), _Dst_shape->_Get_view_extent(), _Dst->_Get_elem_size()));
 
 3034             _Buffer_ptr _PDestBuf = _Dst;
 
 3045                                                                          _Src_shape->_Get_view_size(), 
sizeof(_Value_type), 
true );
 
 3048             _Value_type *_Dst_ptr = 
reinterpret_cast<_Value_type*
>(_PTempStagingBuf->_Get_host_ptr());
 
 3052             _Buffer_ptr _PDestBuf = _Dst;
 
 3054                 return _Copy_impl(_PTempStagingBuf, _Src_shape, _PDestBuf, _Dst_shape);
 
 3059     template <
typename OutputIterator, 
typename _Value_type>
 
 3062         _ASSERTE(_Src != 
NULL);
 
 3063         _ASSERTE(_Src_shape != 
NULL);
 
 3064         _ASSERTE(_Dst_shape != 
NULL);
 
 3066         if (_Src_shape->_Is_projection()) {
 
 3067             _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
 
 3070         if (_Dst_shape->_Is_projection()) {
 
 3071             _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
 
 3074         _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
 
 3076         _ASSERTE(_View_shape::_Compare_extent_with_elem_size(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(),
 
 3077                                                              _Src->_Get_elem_size(), _Dst_shape->_Get_view_extent(), 
sizeof(_Value_type)));
 
 3084             _Buffer_ptr _PSrcBuf = _Src;
 
 3086                 return _Copy_impl_iter(reinterpret_cast<_Value_type*>(_PSrcBuf->_Get_host_ptr()),
 
 3088                                        _DestIter, _Dst_shape);
 
 3097                                                                          _Dst_shape->_Get_view_size(), 
sizeof(_Value_type), 
true);
 
 3099             _Event _Ev = _Src->_Copy_to_async(_PTempStagingBuf, _Src_shape, _Dst_shape);
 
 3101                 return _Copy_impl_iter(reinterpret_cast<_Value_type*>(_PTempStagingBuf->_Get_host_ptr()),
 
 3102                                        _Dst_shape, _DestIter, _Dst_shape);
 
 3108     template<
typename _InputInterator, 
typename _OutputIterator>
 
 3110                                   _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
 
 3112         if (_Src_shape->_Is_projection()) {
 
 3113             _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
 
 3116         if (_Dst_shape->_Is_projection()) {
 
 3117             _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
 
 3120         _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
 
 3121         _ASSERTE(_View_shape::_Compare_extent(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(), _Dst_shape->_Get_view_extent()));
 
 3124         unsigned int _Src_linear_offset, _Src_linear_size, _Dst_linear_offset, _Dst_linear_size;
 
 3125         if (_Src_shape->_Is_view_linear(_Src_linear_offset, _Src_linear_size) &&
 
 3126             _Dst_shape->_Is_view_linear(_Dst_linear_offset, _Dst_linear_size))
 
 3128             _ASSERTE(_Src_linear_size == _Dst_linear_size);
 
 3132             auto _SrcLast = _SrcIter;
 
 3139         std::vector<unsigned int> _Src_extent(_Src_shape->_Get_rank());
 
 3140         std::vector<unsigned int> _Src_offset(_Src_shape->_Get_rank());
 
 3141         std::vector<unsigned int> _Dst_extent(_Dst_shape->_Get_rank());
 
 3142         std::vector<unsigned int> _Dst_offset(_Dst_shape->_Get_rank());
 
 3143         std::vector<unsigned int> _Copy_extent(_Src_shape->_Get_rank());
 
 3145         for (
size_t i = 0; 
i < _Src_shape->_Get_rank(); ++
i) {
 
 3146             _Src_extent[
i] = _Src_shape->_Get_base_extent()[
i];
 
 3147             _Src_offset[
i] = _Src_shape->_Get_view_offset()[
i];
 
 3148             _Dst_extent[
i] = _Dst_shape->_Get_base_extent()[
i];
 
 3149             _Dst_offset[
i] = _Dst_shape->_Get_view_offset()[
i];
 
 3150             _Copy_extent[
i] = _Src_shape->_Get_view_extent()[
i];
 
 3154             _Src_shape->_Get_rank(),
 
 3155             _Src_shape->_Get_linear_offset(),
 
 3158             _Dst_shape->_Get_linear_offset(),
 
 3161             _Copy_extent.data());
 
 3167             auto _SrcFirst = _SrcIter;
 
 3168             auto _DstFirst = _DstIter;
 
 3172             auto _SrcLast = _SrcFirst;
 
 3175             std::copy(_SrcFirst, _SrcLast, _DstFirst);
 
 3192         friend _AMPIMP accelerator_view __cdecl _Select_copy_src_accelerator_view(
_In_ _View_key _Src_view_key, 
const accelerator_view &_Dest_accelerator_view);
 
 3225         _AMPIMP _Buffer_ptr _Get_master_buffer() 
const;
 
 3227         _AMPIMP accelerator_view _Get_master_accelerator_view() 
const;
 
 3233             return _M_master_av;
 
 3238             return _M_master_buffer_elem_size;
 
 3243             return _M_master_buffer_num_elems;
 
 3248             return _M_has_data_source;
 
 3277         _Event _Commit_view_async(
_In_ _View_info *_Info, 
_Inout_ ULONGLONG *_Sync_size = 
nullptr);
 
 3289         void _Unset_discard_flag(
_Inout_ _View_info *_Info);
 
 3293         bool _Should_discard(
const _View_info *_Info, 
_In_opt_ const _View_key _Source_view_key = 
nullptr) 
const;
 
 3298         bool _Has_exclusive_data(
const _View_info *_Info) 
const;
 
 3303         bool _Requires_update_on_target_accelerator_view(
const _View_info *_Info,
 
 3305                                                          _In_ _Accelerator_view_impl* _Target_acclerator_view) 
const;
 
 3310         static void _Flag_redundant_commits(std::vector<std::pair<_View_info*, bool>> &_Commit_list);
 
 3322         _Accelerator_view_unordered_set _Get_caching_info_impl(
_In_ _View_key _Key, 
_Out_opt_ bool *_Can_access_anywhere);
 
 3324         _Ret_ _Accelerator_view_impl* _Determine_alternate_target_accelerator_view(
_In_ _View_key _Key,
 
 3325                                                                              _In_ _Accelerator_view_impl* _Original_av,
 
 3330             auto const iterator = _M_view_map.find(key);
 
 3331             return _M_view_map.end() == iterator ? 
nullptr : iterator->second;
 
 3383         _AMPIMP static _Ret_ void * __cdecl _Get_D3D_sampler_data_ptr(
_In_ IUnknown *_D3D_sampler);
 
 3384         _AMPIMP static void __cdecl _Release_D3D_sampler_data_ptr(
_In_ void *_Sampler_data_ptr);
 
 3416 #define HELPERAPI __cdecl 
 3461             _ASSERTE(_Is_buffer());
 
 3467             _ASSERTE(_Is_texture());
 
 3473             _ASSERTE(_Is_sampler());
 
 3482                 return reinterpret_cast<void *
>(_Tmp);
 
 3484             else if (_Is_texture())
 
 3486                 _Texture * _Tmp = _Get_texture_desc()->_Get_texture_ptr();
 
 3487                 return reinterpret_cast<void *
>(_Tmp);
 
 3491                 _ASSERTE(_Is_sampler());
 
 3492                 _Sampler * _Tmp = _Get_sampler_desc()->_Get_sampler_ptr();
 
 3493                 return reinterpret_cast<void *
>(_Tmp);
 
 3552             if (!_Accelerator_view.is_auto_selection) {
 
 3559             _M_is_explicit_target_acclview = 
false;
 
 3560             if (_M_rv != 
NULL) {
 
 3561                 _M_is_explicit_target_acclview = 
true;
 
 3564             _M_device_resource_info = 
NULL;        
 
 3565             _M_num_resources = 0;
 
 3566             _M_num_writable_buffers = 0;
 
 3567             _M_num_samplers = 0;
 
 3569             _M_const_buffer_info = 
NULL;
 
 3570             _M_num_const_buffers = 0;
 
 3572             _M_RW_aliasing = 
false;
 
 3574             _M_shader_blob = 
NULL;
 
 3576             _M_is_flat_model = 0;
 
 3577             _M_compute_rank = 0;
 
 3578             _M_grid_extents = 
NULL;
 
 3584             _M_groupExtentX = 0;
 
 3585             _M_groupExtentY = 0;
 
 3586             _M_groupExtentZ = 0;            
 
 3591             if (_M_grid_extents) {
 
 3592                 delete [] _M_grid_extents;
 
 3598             return ((_M_aliased_buffer_set != 
nullptr) && (_M_aliased_buffer_set->find(_Buffer_ptr) != _M_aliased_buffer_set->end()));
 
 3603             return ((_M_is_device_buffer_unaccessed != 
nullptr) && _M_is_device_buffer_unaccessed->operator[](_Buffer_idx));
 
 3608             if (_M_is_device_buffer_unaccessed == 
nullptr) {
 
 3609                 _M_is_device_buffer_unaccessed = std::unique_ptr<std::vector<bool>>(
new std::vector<bool>(_M_num_resources, 
false));
 
 3612             _M_is_device_buffer_unaccessed->operator[](_Buffer_idx) = 
true;
 
 3617             if (!_M_RW_aliasing) {
 
 3621             _ASSERTE(_M_Redirect_indices != 
nullptr);
 
 3623             return _M_Redirect_indices->data();
 
 3626         void _Check_buffer_aliasing();
 
 3627         void _Update_buffer_rw_property();
 
 3628         void _Setup_aliasing_redirection_indices();
 
 3629         void _Select_accelerator_view();
 
 3630         void _Verify_buffers_against_accelerator_view();
 
 3680                                                         _Inout_ void * _ShaderBlobs) 
throw(...);
 
 3683                                                          unsigned int _ComputeRank,
 
 3684                                                          _In_ int * _Extents,
 
 3685                                                          unsigned int _GroupRank,
 
 3686                                                          const unsigned int * _GroupExtents,
 
 3687                                                          unsigned int & _GroupCountX,
 
 3688                                                          unsigned int & _GroupCountY,
 
 3689                                                          unsigned int & _GroupCountZ) 
throw(...);
 
 3727     _AMPIMP void _Write_end_event(ULONG _Span_id);
 
 3731     void _Add_accelerator_config_event(
PVOID _Accelerator_id, LPCWSTR _Device_path, LPCWSTR _Device_description);
 
 3735     void _Write_all_accelerator_config_events();
 
 3739     ULONG _Start_accelerator_view_wait_event(
PVOID _Accelerator_id, 
PVOID _Accelerator_view_id);
 
 3743     void _Launch_flush_event(
PVOID _Accelerator_id, 
PVOID _Accelerator_view_id);
 
 3747     ULONG _Launch_marker(
PVOID _Accelerator_id, 
PVOID _Accelerator_view_id);
 
 3755         std::shared_future<void> retFuture;
 
 3760         retFuture = 
std::async(std::launch::deferred, [=]() 
mutable {
 
 3763                 if (_Async_op_id == _Amp_runtime_trace::_M_event_disabled)
 
 3783             retTaskCompletionEvent.
set();
 
 3812     _AMPIMP ULONG _Launch_async_copy_event_helper(nullptr_t, 
const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy);
 
 3813     _AMPIMP ULONG _Launch_async_copy_event_helper(
const _Buffer_descriptor &_Src, nullptr_t, ULONGLONG _Num_bytes_for_copy);
 
 3823     void _Enable_provider(
bool _Enable = 
true);
 
 3836     void _Write_accelerator_config_event(
const std::tuple<PVOID, LPCWSTR, LPCWSTR> &_ConfigTuple);
 
 3839     ULONG _Start_parallel_for_each_event(
 
 3840         PVOID _Accelerator_id, 
 
 3841         PVOID _Accelerator_view_id, 
 
 3842         BOOL _Is_tiled_explicitly, 
 
 3843         ULONGLONG _Num_of_tiles, 
 
 3844         ULONG _Num_of_threads_per_tile, 
 
 3846         ULONG _Num_read_only_resources, 
 
 3847         ULONG _Num_read_write_resources, 
 
 3848         ULONGLONG _Size_of_all_resouces, 
 
 3849         ULONG _Size_of_const_data, 
 
 3850         ULONGLONG _Size_of_data_for_copy);
 
 3853     ULONG _Start_copy_event(
 
 3854         PVOID _Src_accelerator_id, 
 
 3855         PVOID _Src_accelerator_view_id,
 
 3856         PVOID _Dst_accelerator_id, 
 
 3857         PVOID _Dst_accelerator_view_id,
 
 3858         ULONGLONG _Num_bytes_for_copy,
 
 3859         BOOL _Is_src_staging,
 
 3860         BOOL _Is_dst_staging);
 
 3863     ULONG _Launch_async_copy_event(
 
 3864         PVOID _Src_accelerator_id, 
 
 3865         PVOID _Src_accelerator_view_id,
 
 3866         PVOID _Dst_accelerator_id, 
 
 3867         PVOID _Dst_accelerator_view_id,
 
 3868         ULONGLONG _Num_bytes_for_copy,
 
 3869         BOOL _Is_src_staging,
 
 3870         BOOL _Is_dst_staging);
 
 3873     _AMPIMP ULONG _Start_async_op_wait_event(ULONG _Async_op_id);
 
 3876     ULONG _Start_array_view_synchronize_event(ULONGLONG _Num_bytes_to_synchronize);
 
 3879     ULONG _Launch_array_view_synchronize_event(ULONGLONG _Num_bytes_to_synchronize);
 
 3882     std::tuple<PVOID, PVOID, BOOL> _Get_resource_diagnostic_info(
const _Buffer_descriptor &_Buff_desc, accelerator_view _Accl_view) 
const;
 
 3885     std::tuple<PVOID, PVOID, BOOL> _Get_resource_diagnostic_info(
const _Texture_descriptor &_Tex_desc) 
const;
 
 3888     ULONG _Get_unique_identifier();
 
 3905     static const ULONG _M_event_disabled = 0;
 
unsigned int * _M_view_extent
Definition: amprt.h:1878
 
#define _Out_
Definition: sal.h:342
 
unsigned int _M_linear_offset
Definition: amprt.h:1875
 
_AMPIMP bool __cdecl is_timeout_disabled(const accelerator_view &_Accelerator_view)
Returns a boolean flag indicating if timeout is disabled for the specified accelerator_view. This corresponds to the D3D11_CREATE_DEVICE_DISABLE_GPU_TIMEOUT flag for Direct3D device creation. 
 
_Event _Copy_impl(_In_ _Buffer *_Src, _View_shape_ptr _Src_shape, OutputIterator _DestIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3060
 
unsigned int _M_view_mipmap_levels
Definition: amprt.h:575
 
static bool _Compare_extent(unsigned int _Rank, const unsigned int *_Extent1, const unsigned int *_Extent2)
Definition: amprt.h:1668
 
_AMPIMP scoped_d3d_access_lock & operator=(scoped_d3d_access_lock &&_Other)
Move assignment operator for scoped_d3d_access_lock: Take ownership of a lock from another scoped_d3d...
 
completion_future & operator=(const completion_future &_Other)
Copy assignment operator 
Definition: amprt.h:1306
 
_Sampler_descriptor & operator=(const _Sampler_descriptor &_Other) __GPU
Definition: amprt.h:758
 
size_t _Get_elem_size() const 
Definition: amprt.h:2024
 
size_t _Get_master_buffer_elem_size() const 
Definition: amprt.h:3236
 
size_t _M_num_writable_buffers
Definition: amprt.h:3523
 
_CONCRTIMP void __cdecl wait(unsigned int _Milliseconds)
Pauses the current context for a specified amount of time. 
 
unsigned int _Get_bits_per_element() const 
Definition: amprt.h:2251
 
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
 
std::vector< unsigned int > _Copy_extents
Definition: amprt.h:2990
 
size_t _M_rc
Definition: amprt.h:162
 
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
 
size_t _M_depth
Definition: amprt.h:2358
 
_Reference_counted_obj_ptr(const _Reference_counted_obj_ptr &_Other)
Definition: amprt.h:179
 
#define NULL
Definition: vcruntime.h:236
 
T * _M_obj_ptr
Definition: amprt.h:254
 
friend class _D3D_accelerator_view_impl
Definition: amprt.h:323
 
const _View_info * _Get_view_info_ptr(_In_ const _View_key key) const 
Definition: amprt.h:3328
 
_Ret_ void * _Get_data_ptr() const 
Definition: amprt.h:2012
 
void get() const 
Returns the result this task produced. If the task is not in a terminal state, a call to get will wai...
Definition: ppltasks.h:4183
 
_Accelerator_view_impl_ptr _M_master_av
Definition: amprt.h:3340
 
size_t operator()(const accelerator_view &_Accl_view) const 
Definition: amprt.h:1571
 
_Short_vector_base_type_id
Definition: amprt.h:291
 
static _AMPIMP const wchar_t direct3d_ref[]
String constant for direct3d reference accelerator 
Definition: amprt.h:1045
 
_AMPIMP ~_Event()
Destructor of the _Event. 
 
completion_future()
Default constructor 
Definition: amprt.h:1274
 
const unsigned int * _Get_base_extent() const 
Definition: amprt.h:1601
 
std::unique_ptr< std::vector< bool > > _M_is_device_buffer_unaccessed
Definition: amprt.h:3634
 
_DPC_shader_blob * _M_shader_blob
Definition: amprt.h:3533
 
#define _Out_opt_
Definition: sal.h:343
 
void _Get_preferred_copy_chunk_extent(unsigned int _Rank, size_t _Width, size_t _Height, size_t _Depth, size_t _Bits_per_element, _Out_writes_(3) size_t *_Preferred_copy_chunk_extent)
Definition: amprt.h:2619
 
#define S_OK
Definition: comutil.h:62
 
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:725
 
std::future_status wait_for(const std::chrono::duration< _Rep, _Period > &_Rel_time) const 
Blocks until the associated asynchronous operation completes or _Rel_time has elapsed ...
Definition: amprt.h:1370
 
~_DPC_call_handle()
Definition: amprt.h:3589
 
void * _M_data
Definition: amprt.h:3501
 
unsigned int _Get_texture_format() const 
Definition: amprt.h:2230
 
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task 
 
_AMPIMP _Access_mode __cdecl _Get_recommended_buffer_host_access_mode(const accelerator_view &_Av)
 
const size_t ERROR_MSG_BUFFER_SIZE
Definition: amprt.h:118
 
std::shared_future< void > _M_shared_future
Definition: amprt.h:1435
 
__declspec(property(get=get_version)) unsigned int version
 
unsigned int _Get_mip_levels() const 
Definition: amprt.h:2306
 
size_t _M_master_buffer_num_elems
Definition: amprt.h:3350
 
void _Copy_data_on_host_src_iter(int _Rank, _Input_iterator _Src, _Out_ _Value_type *_Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Dst_row_pitch_in_bytes, size_t _Dst_depth_pitch_in_bytes, size_t _Src_row_pitch, size_t _Src_depth_pitch)
Definition: amprt.h:2489
 
const unsigned int * _Get_view_offset() const 
Definition: amprt.h:1606
 
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
 
unsigned int _Rank
Definition: amprt.h:2977
 
unsigned int _M_groupExtentY
Definition: amprt.h:3547
 
accelerator_view _M_accelerator_view
Definition: amprt.h:3645
 
bool _M_RW_aliasing
Definition: amprt.h:3530
 
unsigned int _Get_view_format() const 
Definition: amprt.h:2235
 
bool _Subsumes(const _View_shape *_Other) const 
Definition: amprt.h:1735
 
_AMPIMP accelerator()
Construct a accelerator representing the default accelerator 
 
_AMPIMP void HELPERAPI __dpc_set_kernel_shader_info(_In_ _DPC_call_handle *_Handle, _Inout_ void *_ShaderBlobs)
 
size_t _M_row_pitch
Definition: amprt.h:2365
 
details::_Reference_counted_obj_ptr< details::_Texture > _Texture_ptr
Definition: amprt.h:309
 
_AMPIMP const wchar_t * _Get_description() const 
 
unsigned int * _M_base_extent
Definition: amprt.h:1876
 
bool _Get_chunked_staging_texture(_In_ _Texture *_Tex, const size_t *_Copy_chunk_extent, _Inout_ size_t *_Remaining_copy_extent, _Out_ size_t *_Curr_copy_extent, _Out_ _Texture_ptr *_Staging_texture)
Definition: amprt.h:2443
 
std::wstring get_description() const 
Returns the device description as a std::wstring 
Definition: amprt.h:1140
 
_Ret_ _Accelerator_view_impl * _Get_accelerator_view_impl() const 
Definition: amprt.h:2034
 
_AMPIMP _Ret_ IUnknown *__cdecl get_device(const accelerator_view &_Av)
Get the D3D device interface underlying a accelerator_view. 
 
unsigned int _Get_address_mode() const 
Definition: amprt.h:2402
 
_Buffer_descriptor & operator=(const _Buffer_descriptor &_Other) __GPU
Definition: amprt.h:490
 
std::vector< unsigned int > _Dst_copy_offset
Definition: amprt.h:2987
 
constexpr _Ty & get(array< _Ty, _Size > &_Arr) _NOEXCEPT
Definition: array:493
 
unsigned int _M_groupExtentZ
Definition: amprt.h:3548
 
static _AMPIMP const wchar_t direct3d_warp[]
String constant for direct3d WARP accelerator 
Definition: amprt.h:1040
 
#define __GPU
Definition: amprt.h:45
 
_AMPIMP accelerator_view __cdecl create_accelerator_view(_In_ IUnknown *_D3D_device, queuing_mode _Qmode=queuing_mode_automatic)
Create a accelerator_view from a D3D device interface pointer. 
 
future< result_of_t< decay_t< _Fty >decay_t< _ArgTypes >...)> > async(launch _Policy, _Fty &&_Fnarg, _ArgTypes &&..._Args)
Definition: future:1919
 
size_t _Get_row_pitch() const 
Definition: amprt.h:2311
 
_Reference_counted_obj_ptr & operator=(const _Reference_counted_obj_ptr &_Other)
Definition: amprt.h:200
 
_Buffer_descriptor(_In_ void *_Data_ptr, _In_ _Ubiquitous_buffer *_Buffer_ptr, _Access_mode _Curr_cpu_access_mode, _Access_mode _Type_mode) __GPU
Definition: amprt.h:467
 
void * _M_data_ptr
Definition: amprt.h:2427
 
std::future_status wait_until(const std::chrono::time_point< _Clock, _Duration > &_Abs_time) const 
Blocks until the associated asynchronous operation completes or until the current time exceeds _Abs_t...
Definition: amprt.h:1385
 
void _Copy_data_on_host_dst_iter(int _Rank, const _Value_type *_Src, _Output_iterator _Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Src_row_pitch_in_bytes, size_t _Src_depth_pitch_in_bytes, size_t _Dst_row_pitch, size_t _Dst_depth_pitch)
Definition: amprt.h:2553
 
_In_ int _Val
Definition: vcruntime_string.h:62
 
_Ret_ void * _Get_resource_ptr() const 
Definition: amprt.h:3477
 
Class represents a virtual device abstraction on a C++ AMP data-parallel accelerator ...
Definition: amprt.h:1442
 
_AMPIMP bool _Is_finished()
Poll whether the _Event has completed or not and throws any exceptions that occur ...
 
RAII wrapper for a D3D access lock on an accelerator_view. 
Definition: amprt.h:945
 
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
 
_AMPIMP bool __cdecl d3d_access_try_lock(accelerator_view &_Av)
Attempt to acquire the D3D access lock on an accelerator_view without blocking. 
 
void _Init()
Definition: amprt.h:256
 
_Ret_ _Accelerator_view_impl * _Get_accelerator_view_impl_ptr(const accelerator_view &_Accl_view)
Definition: amprt.h:1556
 
std::vector< unsigned int > _Src_extents
Definition: amprt.h:2981
 
_Ubiquitous_buffer * _M_buffer_ptr
Definition: amprt.h:445
 
bool _M_owns_data
Definition: amprt.h:2137
 
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:313
 
bool _Is_buffer_unaccessed(size_t const _Buffer_idx) const 
Definition: amprt.h:3601
 
The Concurrency namespace provides classes and functions that provide access to the Concurrency Runti...
Definition: agents.h:43
 
Class represents a future corresponding to a C++ AMP asynchronous operation 
Definition: amprt.h:1266
 
_Ret_ _Texture_descriptor * _Get_texture_desc() const 
Definition: amprt.h:3465
 
std::wstring get_device_path() const 
Returns the system-wide unique device instance path as a std::wstring 
Definition: amprt.h:1124
 
bool _Is_valid_access_mode(_Access_mode _Mode)
Definition: amprt.h:417
 
void _Set_buffer_ptr(_In_opt_ _Ubiquitous_buffer *) __GPU_ONLY
Definition: amprt.h:525
 
size_t _Get_depth(unsigned int _Mip_offset=0) const 
Definition: amprt.h:2220
 
std::unordered_set< _View_info * > _M_view_info_set
Definition: amprt.h:3371
 
_Texture_descriptor(_Texture_descriptor &&_Other) __CPU_ONLY
Definition: amprt.h:640
 
void _Set_texture_ptr(_In_opt_ _Texture *) __GPU_ONLY
Definition: amprt.h:691
 
_AMPIMP _Event __cdecl _Copy_async_impl(_In_ _Texture *_Src_tex, const size_t *_Src_offset, unsigned int _Src_mipmap_level, _Out_ _Texture *_Dst_tex, const size_t *_Dst_offset, unsigned int _Dst_mipmap_level, const size_t *_Copy_extent, const size_t *_Preferred_copy_chunk_extent=NULL)
 
_AMPIMP bool operator==(const _Event &_Other) const 
Return true if the other _Event is same as this _Event; false otherwise 
 
accelerator(const std::wstring &_Device_path)
Construct a accelerator representing the accelerator with the specified device instance path ...
Definition: amprt.h:1056
 
Tag type to indicate the D3D access lock should be adopted rather than acquired. 
Definition: amprt.h:940
 
const bool * _Get_projection_info() const 
Definition: amprt.h:1615
 
void _Set_depth_pitch(size_t _Val)
Definition: amprt.h:2326
 
unsigned int _M_is_debug_data
Definition: amprt.h:3506
 
_AMPIMP scoped_d3d_access_lock(accelerator_view &_Av)
Acquire a D3D access lock on the given accelerator_view. The lock is released when this object goes o...
 
_Texture_descriptor(_In_ _Texture *_Texture_ptr) __CPU_ONLY
Definition: amprt.h:594
 
details::_Reference_counted_obj_ptr< details::_Accelerator_impl > _Accelerator_impl_ptr
Definition: amprt.h:307
 
_AMPIMP bool get_supports_limited_double_precision() const 
Returns a boolean value indicating whether the accelerator has limited double precision support (excl...
 
_AMPIMP bool _Is_empty() const 
Tells if this is an empty event 
 
_Ret_ T * operator->() const 
Definition: amprt.h:233
 
bool _M_has_data_source
Definition: amprt.h:3353
 
unsigned int _Get_num_channels() const 
Definition: amprt.h:2240
 
bool _Owns_data() const 
Definition: amprt.h:2044
 
_AMPIMP bool __cdecl _Set_default_accelerator(_Accelerator_impl_ptr _Accl)
 
_AMPIMP _Ret_ _DPC_call_handle *HELPERAPI __dpc_create_call_handle(_In_ _Host_Scheduling_info *_Sch_info)
 
access_type _Get_cpu_access_type(_Access_mode _Cpu_access_mode)
Definition: amprt.h:1945
 
bool operator==(const _Sampler_descriptor &_Other) const __GPU
Definition: amprt.h:775
 
_AMPIMP bool __cdecl _Is_D3D_accelerator_view(const accelerator_view &_Av)
 
_AMPIMP bool get_supports_cpu_shared_memory() const 
Returns a boolean value indicating whether the accelerator supports memory accessible both by the acc...
 
_AMPIMP void _Get()
Wait until the _Event completes and throw any exceptions that occur. 
 
details::_Reference_counted_obj_ptr< details::_Accelerator_view_impl > _Accelerator_view_impl_ptr
Definition: amprt.h:306
 
unsigned int _M_rank
Definition: amprt.h:2355
 
_AMPIMP _Ret_ _Amp_runtime_trace *__cdecl _Get_amp_trace()
 
BOOL _Is_sampler() const 
Definition: amprt.h:3454
 
bool _Is_adopted() const 
Definition: amprt.h:2081
 
enum _Short_vector_base_type_id _Texture_base_type_id
Definition: amprt.h:302
 
basic_ostream< _Elem, _Traits > &__CLRCALL_OR_CDECL flush(basic_ostream< _Elem, _Traits > &_Ostr)
Definition: ostream:1009
 
The task_completion_event class allows you to delay the execution of a task until a condition is sati...
Definition: ppltasks.h:2625
 
_AMPIMP bool get_has_display() const 
Returns a boolean value indicating whether the accelerator is attached to a display ...
 
_Sampler_descriptor(const _Sampler_descriptor &_Other) __GPU
Definition: amprt.h:751
 
size_t _Get_reference_count()
Definition: amprt.h:156
 
void _Set_buffer_ptr(_In_opt_ _Ubiquitous_buffer *_Buffer_ptr) __CPU_ONLY
Definition: amprt.h:508
 
size_t _M_num_resources
Definition: amprt.h:3522
 
_AMPIMP accelerator & operator=(const accelerator &_Other)
Assignment operator 
 
bool _Compare_base_shape(const _View_shape *_Other) const 
Definition: amprt.h:1834
 
bool _Is_host_accessible(_Access_mode _Requested_access_mode) const 
Definition: amprt.h:2066
 
unsigned int _Get_most_detailed_mipmap_level() const __GPU
Definition: amprt.h:659
 
#define UINT_MAX
Definition: limits.h:36
 
A non-reentrant mutex which is explicitly aware of the Concurrency Runtime. 
Definition: concrt.h:3488
 
unsigned int _M_groupCountZ
Definition: amprt.h:3543
 
const unsigned int * _Get_view_extent() const 
Definition: amprt.h:1610
 
_T _Greatest_common_divisor(_T _M, _T _N)
Definition: amprt.h:2658
 
bool _Is_buffer_aliased(_In_ void *const _Buffer_ptr) const 
Definition: amprt.h:3596
 
_AMPIMP bool get_is_emulated() const 
Returns a boolean value indicating whether the accelerator is emulated. This is true, for example, with the direct3d reference and WARP accelerators. 
 
_Accelerator_view_impl_ptr _M_access_on_accelerator_view
Definition: amprt.h:2130
 
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator 
Definition: amprt.h:1035
 
_Trace * _M_trace_ptr
Definition: amprt.h:3902
 
typedef PVOID(NTAPI *ResolveDelayLoadedAPIProc)(_In_ PVOID ParentModuleBase
 
_Iter_diff_t< _InIt > distance(_InIt _First, _InIt _Last)
Definition: xutility:1124
 
_Event _Copy_impl_iter(_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3109
 
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev 
 
unsigned int _M_bits_per_channel
Definition: amprt.h:2361
 
void then(const _Functor &_Func) const 
Chains a callback Functor to the completion_future to be executed when the associated asynchronous op...
Definition: amprt.h:1408
 
unsigned int _M_filter_mode
Definition: amprt.h:2429
 
_Ret_ T * _Get_ptr() const 
Definition: amprt.h:248
 
_AMPIMP ~scoped_d3d_access_lock()
Destructor for scoped_d3d_access_lock: unlock the accelerator_view. 
 
unsigned int _M_mip_levels
Definition: amprt.h:2363
 
int i[4]
Definition: dvec.h:68
 
_Buffer_descriptor() __GPU
Definition: amprt.h:461
 
void _Set_row_pitch(size_t _Val)
Definition: amprt.h:2316
 
details::_Reference_counted_obj_ptr< details::_Ubiquitous_buffer > _Ubiquitous_buffer_ptr
Definition: amprt.h:311
 
static std::vector< accelerator > get_all()
Returns the vector of accelerator objects representing all available accelerators ...
Definition: amprt.h:1082
 
bool _Contains(const unsigned int *_Element_index) const 
Definition: amprt.h:1845
 
_Texture_descriptor(const _Texture_descriptor &_Other) __GPU
Definition: amprt.h:610
 
completion_future(completion_future &&_Other)
Move constructor 
Definition: amprt.h:1290
 
_AMPIMP _Ret_ _Accelerator_impl_ptr *__cdecl _Get_devices()
 
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:450
 
unsigned int _Get_view_mipmap_levels() const __GPU
Definition: amprt.h:664
 
#define _In_
Definition: sal.h:305
 
_AMPIMP void __cdecl amp_uninitialize()
Uninitializes the C++ AMP runtime. It is legal to call this function multiple times during an applica...
 
volatile ULONG _M_counter
Definition: amprt.h:3899
 
#define _Inout_opt_
Definition: sal.h:376
 
size_t _Get_num_elems() const 
Definition: amprt.h:2029
 
std::unique_ptr< std::unordered_set< _View_key > > _M_view_keys
Definition: amprt.h:2147
 
bool _M_is_temp
Definition: amprt.h:2141
 
_Accelerator_view_impl_ptr _M_accelerator_view
Definition: amprt.h:2129
 
#define _In_opt_
Definition: sal.h:306
 
_Event_impl_ptr _M_ptr_event_impl
Definition: amprt.h:407
 
_Sampler_descriptor(_Sampler_descriptor &&_Other) __CPU_ONLY
Definition: amprt.h:770
 
#define FAILED(hr)
Definition: comutil.h:71
 
void _Set_sampler_ptr(_In_opt_ _Sampler *) __GPU_ONLY
Definition: amprt.h:802
 
_Ret_ void * _Get_data_ptr() const 
Definition: amprt.h:2386
 
bool _Overlaps(const _View_shape *_Other) const 
Definition: amprt.h:1704
 
std::vector< unsigned int > _Src_copy_offset
Definition: amprt.h:2982
 
unsigned int _Get_view_size() const 
Definition: amprt.h:1636
 
unsigned int _Dst_linear_offset
Definition: amprt.h:2985
 
struct Concurrency::details::_Sampler_descriptor _Sampler_descriptor
 
_AMPIMP _View_shape_ptr _Get_view_shape(_In_ _View_key _Key)
 
_Reference_counted_obj_ptr & operator=(_Reference_counted_obj_ptr &&_Other)
Definition: amprt.h:216
 
_Ret_ _Accelerator_view_impl * _Get_access_on_accelerator_view_impl() const 
Definition: amprt.h:2039
 
#define __CPU_ONLY
Definition: amprt.h:47
 
IUnknown * _M_data_ptr
Definition: amprt.h:558
 
BOOL _Is_buffer() const 
Definition: amprt.h:3444
 
_Accelerator_view_impl_ptr _M_impl
Definition: amprt.h:1550
 
const float * _Get_border_color() const 
Definition: amprt.h:2407
 
static _AMPIMP const wchar_t default_accelerator[]
String constant for default accelerator 
Definition: amprt.h:1030
 
bool _M_is_adopted
Definition: amprt.h:2428
 
bool _M_is_staging
Definition: amprt.h:2138
 
_Ret_ _Buffer_descriptor * _Get_buffer_desc() const 
Definition: amprt.h:3459
 
size_t _M_height
Definition: amprt.h:2357
 
_Array_copy_desc(const unsigned int _Rank, const unsigned int _Src_linear_offset, const unsigned int *_Src_extents, const unsigned int *_Src_copy_offset, const unsigned int _Dst_linear_offset, const unsigned int *_Dst_extents, const unsigned int *_Dst_copy_offset, const unsigned int *_Copy_extents)
Definition: amprt.h:2952
 
~_Reference_counted_obj_ptr()
Definition: amprt.h:192
 
Exception thrown due to a C++ AMP runtime_exception. This is the base type for all C++ AMP exception ...
Definition: amprt_exceptions.h:29
 
std::unordered_map< _View_key, _View_info * > _M_view_map
Definition: amprt.h:3362
 
bool _Is_buffer() const 
Definition: amprt.h:2087
 
_Buffer_descriptor * _View_key
Definition: amprt.h:410
 
void wait() const 
Blocks until the associated asynchronous operation completes 
Definition: amprt.h:1355
 
_AMPIMP size_t get_dedicated_memory() const 
Get the dedicated memory for this accelerator in KB 
 
struct Concurrency::details::_Buffer_descriptor _Buffer_descriptor
 
_AMPIMP accelerator_view get_default_view() const 
Return the default accelerator view associated with this accelerator 
 
_Ret_ void * _Get_host_ptr() const 
Definition: amprt.h:2019
 
_AMPIMP accelerator __cdecl _Select_default_accelerator()
 
unsigned int _Get_linear_offset() const 
Definition: amprt.h:1596
 
_AMPIMP void __cdecl d3d_access_lock(accelerator_view &_Av)
Acquire a lock on an accelerator_view for the purpose of safely performing D3D operations on resource...
 
unsigned int _M_compute_rank
Definition: amprt.h:3537
 
The Parallel Patterns Library (PPL) task class. A task object represents work that can be executed as...
Definition: ppltasks.h:3898
 
void swap(array< _Ty, _Size > &_Left, array< _Ty, _Size > &_Right) _NOEXCEPT_OP(_NOEXCEPT_OP(_Left.swap(_Right)))
Definition: array:433
 
std::unordered_set< accelerator_view, _Accelerator_view_hasher > _Accelerator_view_unordered_set
Definition: amprt.h:1578
 
#define HELPERAPI
Definition: amprt.h:3416
 
bool _Are_mipmap_levels_overlapping(const _Texture_descriptor *_Other) const __CPU_ONLY
Definition: amprt.h:699
 
size_t _Get_master_buffer_num_elems() const 
Definition: amprt.h:3241
 
_AMPIMP bool set_default_cpu_access_type(access_type _Default_cpu_access_type)
Set the default cpu access_type for arrays created on this accelerator or for implicit memory allocat...
 
std::map< _Accelerator_view_impl_ptr, _Buffer_ptr > _M_buffer_map
Definition: amprt.h:3358
 
_AMPIMP void _Get_CPU_access(_Access_mode _Requested_mode) const 
 
_Access_mode _M_current_host_access_mode
Definition: amprt.h:2134
 
_Accelerator_impl_ptr _M_impl
Definition: amprt.h:1260
 
details::_Reference_counted_obj_ptr< details::_Event_impl > _Event_impl_ptr
Definition: amprt.h:312
 
unsigned int _Get_data_length(unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels, const size_t *_Extents=nullptr) const 
Definition: amprt.h:2256
 
size_t _M_actual_arg_num
Definition: amprt.h:3442
 
_Ret_ _Ubiquitous_buffer * _Get_buffer(const _Array_type &_Array) __CPU_ONLY
Definition: xxamp.h:1069
 
size_t _M_width
Definition: amprt.h:2356
 
unsigned int _M_groupCountX
Definition: amprt.h:3541
 
_Reference_counted_obj_ptr(_Reference_counted_obj_ptr &&_Other)
Definition: amprt.h:185
 
_Sampler_descriptor() __GPU
Definition: amprt.h:733
 
_Ret_ _Sampler * _Get_sampler_ptr() const __CPU_ONLY
Definition: amprt.h:780
 
_Reference_counter()
Definition: amprt.h:126
 
unsigned int _Get_mipmap_levels(const _Texture *_Tex)
Definition: amprt.h:3909
 
_Sampler_descriptor(_In_ _Sampler *_Sampler_ptr) __GPU
Definition: amprt.h:738
 
#define false
Definition: stdbool.h:16
 
_AMPIMP ULONG _Start_async_op_wait_event(ULONG _Async_op_id)
 
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
 
_Access_mode _M_type_access_mode
Definition: amprt.h:455
 
void _Set_data_ptr(_In_ IUnknown *_Data_ptr)
Definition: amprt.h:2123
 
_AMPIMP access_type get_default_cpu_access_type() const 
Get the default cpu access_type for buffers created on this accelerator 
 
unsigned int _M_rank
Definition: amprt.h:1874
 
_AMPIMP void __cdecl d3d_access_unlock(accelerator_view &_Av)
Release the D3D access lock on the given accelerator_view. If the calling thread does not hold the lo...
 
bool _Is_valid() const 
Definition: amprt.h:1810
 
unsigned int _M_groupExtentX
Definition: amprt.h:3546
 
void * _M_data_ptr
Definition: amprt.h:720
 
Concurrency::critical_section _M_critical_section
Definition: amprt.h:2148
 
friend _Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
 
_AMPIMP void _Init(const wchar_t *_Path)
 
unsigned int _Get_rank() const 
Definition: amprt.h:2225
 
size_t _Get_depth_pitch() const 
Definition: amprt.h:2321
 
~_Texture_descriptor() __GPU
Definition: amprt.h:604
 
completion_future & operator=(completion_future &&_Other)
Move assignment operator 
Definition: amprt.h:1319
 
unsigned int * _M_view_offset
Definition: amprt.h:1877
 
~_Buffer_descriptor() __GPU
Definition: amprt.h:476
 
_Device_resource_info * _M_device_resource_info
Definition: amprt.h:3521
 
_AMPIMP size_t __cdecl _Get_preferred_copy_chunk_size(size_t _Total_copy_size_in_bytes)
 
void * _M_data_ptr
Definition: amprt.h:438
 
bool _Is_projection() const 
Definition: amprt.h:1620
 
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
 
#define _AMPIMP
Definition: amprt_exceptions.h:20
 
unsigned int _Get_linear_offset(const unsigned int *_Element_index) const 
Definition: amprt.h:1859
 
std::vector< std::tuple< PVOID, LPCWSTR, LPCWSTR > > _M_accelerator_configs
Definition: amprt.h:3896
 
_AMPIMP void HELPERAPI __dpc_dispatch_kernel(_In_ _DPC_call_handle *_Handle)
 
_Access_mode
Definition: amprt.h:88
 
_AMPIMP _Event & operator=(const _Event &_Other)
Assignment operator 
 
static bool set_default(const std::wstring &_Path)
Sets the default accelerator to be used for any operation that implicitly uses the default accelerato...
Definition: amprt.h:1105
 
basic_string< wchar_t, char_traits< wchar_t >, allocator< wchar_t > > wstring
Definition: xstring:2636
 
bool operator==(const _Texture_descriptor &_Other) const __GPU
Definition: amprt.h:645
 
_AMPIMP void HELPERAPI __dpc_set_const_buffer_info(_In_ _DPC_call_handle *_Handle, _In_ _Device_const_buffer_info *_DeviceConstBufferInfo, size_t _NumConstBuffers)
 
bool _Is_adopted() const 
Definition: amprt.h:2391
 
std::vector< unsigned int > _Dst_extents
Definition: amprt.h:2986
 
size_t _Get_height(unsigned int _Mip_offset=0) const 
Definition: amprt.h:2215
 
void _Set_view_mipmap_levels(unsigned int _View_mipmap_levels) __CPU_ONLY
Definition: amprt.h:669
 
unsigned int _M_texture_format
Definition: amprt.h:2359
 
_AMPIMP void HELPERAPI __dpc_set_device_resource_info(_In_ _DPC_call_handle *_Handle, _In_ _Device_resource_info *_DeviceResourceInfo, size_t _NumResources)
 
_Array_copy_desc()
Definition: amprt.h:2975
 
_Device_const_buffer_info * _M_const_buffer_info
Definition: amprt.h:3527
 
_DPC_call_handle(const accelerator_view &_Accelerator_view)
Definition: amprt.h:3550
 
void _Set_texture_ptr(_In_opt_ _Texture *_Texture_ptr) __CPU_ONLY
Definition: amprt.h:674
 
__declspec(property(get=get_device_path)) std _AMPIMP unsigned int get_version() const 
Get the version for this accelerator 
 
concurrency::task< void > to_task() const 
Returns a concurrency::task object corresponding to the associated asynchronous operation ...
Definition: amprt.h:1421
 
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:1089
 
future_status
Definition: future:97
 
static unsigned int _Get_extent_size(unsigned int _Rank, const unsigned int *_Extent)
Definition: amprt.h:1800
 
T & operator*() const 
Definition: amprt.h:238
 
#define _Out_writes_(size)
Definition: sal.h:345
 
static bool _Intervals_overlap(size_t _First_start, size_t _First_end, size_t _Second_start, size_t _Second_end)
Definition: amprt.h:1782
 
unsigned int _Get_view_linear_offset() const 
Definition: amprt.h:1641
 
access_type _Get_allowed_host_access_type() const 
Definition: amprt.h:2061
 
_AMPIMP _Event()
Constructor of the _Event. 
 
int _M_is_flat_model
Definition: amprt.h:3536
 
unsigned int _M_groupCountY
Definition: amprt.h:3542
 
_AMPIMP void HELPERAPI __dpc_release_call_handle(_In_ _DPC_call_handle *_Handle)
 
unsigned int _M_num_channels
Definition: amprt.h:2362
 
queuing_mode
Queuing modes supported for accelerator views 
Definition: amprt.h:830
 
_Ret_ _Sampler_descriptor * _Get_sampler_desc() const 
Definition: amprt.h:3471
 
unsigned int _Src_linear_offset
Definition: amprt.h:2980
 
Definition: type_traits:92
 
_Resource_kind
Definition: amprt.h:3423
 
_Buffer_descriptor(const _Buffer_descriptor &_Other) __GPU
Definition: amprt.h:482
 
unsigned int _M_most_detailed_mipmap_level
Definition: amprt.h:569
 
bool _Is_array() const 
Definition: amprt.h:532
 
details::_Reference_counted_obj_ptr< details::_Sampler > _Sampler_ptr
Definition: amprt.h:310
 
size_t _M_const_buf_size
Definition: amprt.h:3504
 
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1885
 
size_t _M_num_samplers
Definition: amprt.h:3524
 
_Accelerator_view_impl * _M_rv
Definition: amprt.h:3517
 
bool valid() const 
Returns true if the object is associated with an asynchronous operation 
Definition: amprt.h:1347
 
~_Sampler_descriptor() __GPU
Definition: amprt.h:745
 
_Accelerator_view_impl_ptr _M_impl
Definition: amprt.h:1006
 
_Ret_ _Accelerator_impl * _Get_accelerator_impl_ptr(const accelerator &_Accl)
Definition: amprt.h:1561
 
constexpr remove_reference< _Ty >::type && move(_Ty &&_Arg) _NOEXCEPT
Definition: type_traits:1290
 
friend class accelerator_view
Definition: amprt.h:1015
 
_AMPIMP const wchar_t * _Get_device_path() const 
 
_AMPIMP void HELPERAPI __dpc_set_kernel_dispatch_info(_In_ _DPC_call_handle *_Handle, unsigned int _ComputeRank, _In_ int *_Extents, unsigned int _GroupRank, const unsigned int *_GroupExtents, unsigned int &_GroupCountX, unsigned int &_GroupCountY, unsigned int &_GroupCountZ)
 
#define _Inout_
Definition: sal.h:375
 
Concurrency::critical_section _M_critical_section
Definition: amprt.h:3374
 
virtual ~_Reference_counter() noexcept(false)
Definition: amprt.h:129
 
exception_ptr current_exception() _NOEXCEPT
Definition: exception:359
 
_AMPIMP bool get_supports_double_precision() const 
Returns a boolean value indicating whether the accelerator supports full double precision (including ...
 
static _AMPIMP accelerator_view __cdecl get_auto_selection_view()
Returns the auto selection accelerator_view which when specified as the parallel_for_each target resu...
 
size_t _M_depth_pitch
Definition: amprt.h:2366
 
_Ret_ _Texture * _Get_texture_ptr() const __CPU_ONLY
Definition: amprt.h:653
 
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
 
std::enable_if<!std::is_base_of< std::input_iterator_tag, typename std::iterator_traits< _OutputIterator >::iterator_category >::value >::type _Advance_output_iterator(_OutputIterator &_Iter, size_t _N)
Definition: amprt.h:2817
 
static void _UnInitialize(_In_ T *_Obj_ptr)
Definition: amprt.h:264
 
bool _M_is_explicit_target_acclview
Definition: amprt.h:3518
 
_Access_mode _Get_synchronize_access_mode(access_type cpu_access_type)
Definition: amprt.h:1927
 
const _View_key _Get_view_key() const 
Definition: amprt.h:542
 
_Resource_kind _M_resource_kind
Definition: amprt.h:3432
 
_Texture_descriptor(unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels) __GPU
Definition: amprt.h:588
 
Definition: type_traits:950
 
completion_future(const completion_future &_Other)
Copy constructor 
Definition: amprt.h:1281
 
bool _Is_temp() const 
Definition: amprt.h:2076
 
size_t _M_num_const_buffers
Definition: amprt.h:3528
 
size_t _M_num_elems
Definition: amprt.h:2136
 
bool _M_is_adopted
Definition: amprt.h:2143
 
~completion_future()
Destructor 
Definition: amprt.h:1299
 
Class represents a accelerator abstraction for C++ AMP data-parallel devices 
Definition: amprt.h:1013
 
_Access_mode _Get_current_host_access_mode() const 
Definition: amprt.h:2071
 
_Texture_descriptor() __GPU
Definition: amprt.h:581
 
BOOL _Is_texture() const 
Definition: amprt.h:3449
 
_Buffer * _M_master_buffer
Definition: amprt.h:3344
 
size_t _Remove_reference()
Definition: amprt.h:140
 
_Texture * _M_texture_ptr
Definition: amprt.h:565
 
void _Set_host_ptr(_In_ void *_Host_ptr, _Access_mode _Host_access_mode=_No_access)
Definition: amprt.h:2110
 
std::unique_ptr< std::vector< int > > _M_Redirect_indices
Definition: amprt.h:3636
 
_AMPIMP size_t __cdecl _Get_num_devices()
 
size_t _Add_reference()
Definition: amprt.h:133
 
std::unique_ptr< std::unordered_set< void * > > _M_aliased_buffer_set
Definition: amprt.h:3633
 
_Texture_descriptor & operator=(const _Texture_descriptor &_Other) __GPU
Definition: amprt.h:626
 
Concurrency::critical_section _M_critical_section
Definition: amprt.h:3893
 
size_t _M_elem_size
Definition: amprt.h:2135
 
bool _Has_data_source() const 
Definition: amprt.h:3246
 
void _Set_sampler_ptr(_In_opt_ _Sampler *_Sampler_ptr) __CPU_ONLY
Definition: amprt.h:785
 
_AMPIMP accelerator_view create_view(queuing_mode qmode=queuing_mode_automatic)
Create and return a new accelerator view on this accelerator with the specified queuing mode...
 
#define __GPU_ONLY
Definition: amprt.h:46
 
bool _M_is_buffer
Definition: amprt.h:2144
 
_AMPIMP HRESULT __cdecl _Recursive_array_copy(const _Array_copy_desc &_Desc, unsigned int _Native_copy_rank, std::function< HRESULT(const _Array_copy_desc &_Reduced)> _Native_copy_func)
 
size_t _Get_width(unsigned int _Mip_offset=0) const 
Definition: amprt.h:2210
 
bool _Is_staging() const 
Definition: amprt.h:2051
 
bool set() const 
Sets the task completion event. 
Definition: ppltasks.h:2641
 
bool _Is_view_linear(unsigned int &_Linear_offset, unsigned int &_Linear_size) const 
Definition: amprt.h:1679
 
_Sampler * _M_sampler_ptr
Definition: amprt.h:727
 
concurrency::task< void > _M_task
Definition: amprt.h:1436
 
bool _Is_cpu_accelerator(const accelerator &_Accl)
Definition: amprt.h:3401
 
bool _Is_valid(size_t _Buffer_size) const 
Definition: amprt.h:1625
 
struct Concurrency::details::_Texture_descriptor _Texture_descriptor
 
void * _M_host_ptr
Definition: amprt.h:2132
 
unsigned int _Get_filter_mode() const 
Definition: amprt.h:2397
 
unsigned int _M_address_mode
Definition: amprt.h:2430
 
_Access_mode _M_formal_access_mode
Definition: amprt.h:3437
 
_Reference_counted_obj_ptr(T *_Ptr=NULL)
Definition: amprt.h:173
 
unsigned int _Get_bits_per_channel() const 
Definition: amprt.h:2245
 
static bool _Compare_extent_with_elem_size(unsigned int _Rank, const unsigned int *_Extent1, size_t _Elem_size1, const unsigned int *_Extent2, size_t _Elem_size2)
Definition: amprt.h:1647
 
void * _M_desc
Definition: amprt.h:3434
 
unsigned int _M_view_format
Definition: amprt.h:2360
 
const int * _Get_redirect_indices() const 
Definition: amprt.h:3615
 
_Access_mode _M_allowed_host_access_mode
Definition: amprt.h:2133
 
_AMPIMP ~accelerator()
Destructor 
 
__declspec(property(get=get_description)) std _AMPIMP bool get_is_debug() const 
Returns a boolean value indicating whether the accelerator was created with DEBUG layer enabled for e...
 
_AMPIMP bool operator!=(const _Event &_Other) const 
Return false if the other _Event is same as this _Event; true otherwise 
 
unsigned int * _M_grid_extents
Definition: amprt.h:3538
 
_T _Least_common_multiple(_T _M, _T _N)
Definition: amprt.h:2681
 
friend _Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
 
void _Set_buffer_unaccessed(size_t _Buffer_idx)
Definition: amprt.h:3606
 
size_t _Get_preferred_copy_chunk_num_elems(size_t _Total_num_elems, size_t _Elem_size)
Definition: amprt.h:2612
 
_AMPIMP void _Write_end_event(ULONG _Span_id)
 
unsigned int _Get_rank() const 
Definition: amprt.h:1591
 
#define _Ret_
Definition: sal.h:996
 
_Texture_descriptor(const _Texture_descriptor &_Other, unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels) __GPU
Definition: amprt.h:618
 
_Access_mode _Get_allowed_host_access_mode() const 
Definition: amprt.h:2056
 
concurrency::completion_future _Start_async_op_wait_event_helper(ULONG _Async_op_id, _Event _Ev)
Definition: amprt.h:3753
 
bool * _M_projection_info
Definition: amprt.h:1879
 
access_type
Enumeration type used to denote the various types of access to data. 
Definition: amprt.h:103
 
void * _M_data_ptr
Definition: amprt.h:2131
 
size_t _M_master_buffer_elem_size
Definition: amprt.h:3347
 
_AMPIMP void __cdecl _Register_async_event(const _Event &_Ev, const std::shared_future< void > &_Shared_future)
 
_AMPIMP bool _Is_finished_nothrow()
Poll whether the _Event has completed or not. Swallows any exceptions 
 
_Ret_ _Accelerator_view_impl * _Get_master_accelerator_view_impl() const 
Definition: amprt.h:3231
 
completion_future(const std::shared_future< void > &_Shared_future, const concurrency::task< void > &_Task)
Definition: amprt.h:1429