STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Public Types | Public Member Functions | Protected Member Functions | Protected Attributes | Private Member Functions | Static Private Member Functions | Friends | List of all members
Concurrency::details::_Array_view_base< _Rank, _Element_size > Class Template Reference

#include <amp.h>

Inheritance diagram for Concurrency::details::_Array_view_base< _Rank, _Element_size >:
Concurrency::details::_Array_view_shape< _Rank, _Element_size >

Public Types

typedef details::_Buffer_descriptor _Buffer_descriptor
 

Public Member Functions

 ~_Array_view_base () __GPU
 
- Public Member Functions inherited from Concurrency::details::_Array_view_shape< _Rank, _Element_size >
__declspec(property(get=get_extent)) Concurrency Concurrency::extent< _Rank > get_extent () const __GPU
 The extent of this array or view. More...
 
 ~_Array_view_shape () __GPU
 

Protected Member Functions

 _Array_view_base () __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, const _Array_view_shape &_Shape) __GPU
 
 _Array_view_base (const _Array_view_base &_Other) __GPU
 
 _Array_view_base (const _Array_view_base &_Other, const Concurrency::extent< _Rank > &_Array_extent) __GPU
 
 _Array_view_base (const _Array_view_base &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, const Concurrency::extent< _Rank > &_Array_extent) __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent) __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_View_offset, const Concurrency::extent< _Rank > &_View_extent) __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
 
 _Array_view_base (const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
 
 _Array_view_base (_In_ void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
 
 _Array_view_base (_In_ void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __GPU_ONLY
 
 _Array_view_base (const void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
 
 _Array_view_base (const void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __GPU_ONLY
 
_Array_view_baseoperator= (const _Array_view_base &_Other) __GPU
 
_Ret_ void_Access (const index< _Rank > &_Index) const __GPU
 
_Ret_ void_Access (_Access_mode _Requested_mode, const index< _Rank > &_Index) const __CPU_ONLY
 
_Ret_ void_Access (_Access_mode _Requested_mode, const index< _Rank > &_Index) const __GPU_ONLY
 
_Array_view_base _Section (const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
 
_Array_view_base _Section (const index< _Rank > &_Idx) const __GPU
 
void _Project0 (int _I, _Array_view_base< _Rank-1, _Element_size > &_Projected_view) const __GPU
 
template<int _New_element_size>
_Array_view_base< _Rank, _New_element_size > _Reinterpret_as () const __GPU
 
template<int _New_rank>
_Array_view_base< _New_rank, _Element_size > _View_as (const Concurrency::extent< _New_rank > &_View_extent) const __GPU
 
_Ret_ _View_shape_Create_buffer_view_shape () const __CPU_ONLY
 
- Protected Member Functions inherited from Concurrency::details::_Array_view_shape< _Rank, _Element_size >
int _Base_linear_offset () const __GPU
 
 _Array_view_shape (const _Array_view_shape &_Other) __GPU
 
 _Array_view_shape (const _Array_view_shape &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
 
 _Array_view_shape (int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent) __GPU
 
 _Array_view_shape (int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
 
_Array_view_shapeoperator= (const _Array_view_shape &_Other) __GPU
 
void _Project0 (int _I, _Array_view_shape< _Rank-1, _Element_size > &_Projected_shape) const __GPU
 
 _Array_view_shape () __GPU
 

Protected Attributes

_Buffer_descriptor _M_buffer_descriptor
 
- Protected Attributes inherited from Concurrency::details::_Array_view_shape< _Rank, _Element_size >
Concurrency::extent< _Rank > _M_array_extent
 
Concurrency::extent< _Rank > _M_array_multiplier
 
Concurrency::index< _Rank > _M_view_offset
 
int _M_total_linear_offset
 
Concurrency::extent< _Rank > _M_view_extent
 

Private Member Functions

void _Register () __CPU_ONLY
 
void _Register_copy (const _Array_view_base &_Other) __CPU_ONLY
 
void _Register (_In_ void *_Shape) __CPU_ONLY
 
void _Unregister (bool _Throw_exception=true) __CPU_ONLY
 
void _Register () __GPU_ONLY
 
void _Register_copy (const _Array_view_base &_Other) __GPU_ONLY
 
void _Register (_In_ void *_Shape) __GPU_ONLY
 
void _Unregister (bool _Throw_exception=true) __GPU_ONLY
 

Static Private Member Functions

static _Ret_ void_Create_projection_buffer_shape (const _Buffer_descriptor &_Descriptor, unsigned int _Dim, int _Dim_offset) __CPU_ONLY
 
static _Ret_ void_Create_section_buffer_shape (const _Buffer_descriptor &_Descriptor, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __CPU_ONLY
 
static _Ret_ void_Create_projection_buffer_shape (const _Buffer_descriptor &_Descriptor, int _Dim, int _I) __GPU_ONLY
 
static _Ret_ void_Create_section_buffer_shape (const _Buffer_descriptor &_Descriptor, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU_ONLY
 

Friends

template<int _R, int _S>
class _Array_view_base
 

Member Typedef Documentation

template<int _Rank, int _Element_size>
typedef details::_Buffer_descriptor Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Buffer_descriptor

Constructor & Destructor Documentation

template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::~_Array_view_base ( )
inline
1661  {
1662  // Unregister the view; Do not throw exception
1663  _Unregister(false);
1664  }
void _Unregister(bool _Throw_exception=true) __CPU_ONLY
Definition: amp.h:2009
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( )
inlineprotected
1668 {}
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
const _Array_view_shape _Shape 
)
inlineprotected
1671  :
1672  _M_buffer_descriptor(_Buffer_desc),
1674  {
1675  // Register the view
1676  _Register();
1677  }
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Array_view_base< _Rank, _Element_size > &  _Other)
inlineprotected
1680  :
1683  {
1684  // Register the view
1685  _Register_copy(_Other);
1686  }
void _Register_copy(const _Array_view_base &_Other) __CPU_ONLY
Definition: amp.h:1979
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Array_view_base< _Rank, _Element_size > &  _Other,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1689  :
1692  {
1693  // Register the view
1694  _Register();
1695  }
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
int _Base_linear_offset() const __GPU
Definition: amp.h:1546
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Array_view_base< _Rank, _Element_size > &  _Other,
const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
)
inlineprotected
1698  :
1700  _Array_view_shape<_Rank, _Element_size>(_Other, _Section_origin, _Section_extent)
1701  {
1702  // Register the view
1703  _Register();
1704  }
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1707  :
1708  _M_buffer_descriptor(_Buffer_desc),
1710  {
1711  // Register the view
1712  _Register();
1713  }
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
int  _Base_linear_offset,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1716  :
1717  _M_buffer_descriptor(_Buffer_desc),
1719  {
1720  // Register the view
1721  _Register();
1722  }
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
int _Base_linear_offset() const __GPU
Definition: amp.h:1546
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
int  _Base_linear_offset,
const Concurrency::extent< _Rank > &  _Array_extent,
const Concurrency::index< _Rank > &  _View_offset,
const Concurrency::extent< _Rank > &  _View_extent 
)
inlineprotected
1731  :
1732  _M_buffer_descriptor(_Buffer_desc),
1733  _Array_view_shape<_Rank, _Element_size>(_Base_linear_offset,_Array_extent,_View_offset,_View_extent)
1734  {
1735  // Register the view
1736  _Register();
1737  }
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
int _Base_linear_offset() const __GPU
Definition: amp.h:1546
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
const Concurrency::extent< _Rank > &  _Array_extent,
const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
)
inlineprotected
1741  :
1742  _M_buffer_descriptor(_Buffer_desc),
1743  _Array_view_shape<_Rank, _Element_size>(0,_Array_extent,_Section_origin,_Section_extent)
1744  {
1745  // Register the view
1746  _Register();
1747  }
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const Concurrency::extent< _Rank > &  _Array_extent)
inlineprotected
1750  :
1752  {
1753  _Ubiquitous_buffer_ptr _PUBuf = _Ubiquitous_buffer::_Create_ubiquitous_buffer(_Array_extent.size(), _Element_size * sizeof(int));
1755 
1756  // Register the view
1757  _Register();
1758  }
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:1658
void _Register() __CPU_ONLY
Definition: amp.h:1961
_CRTIMP _In_opt_z_ const wchar_t _In_opt_z_ const wchar_t unsigned int
Definition: crtdefs.h:642
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
#define NULL
Definition: crtdbg.h:30
details::_Reference_counted_obj_ptr< details::_Ubiquitous_buffer > _Ubiquitous_buffer_ptr
Definition: amprt.h:305
Definition: amprt.h:84
static _AMPIMP _Ret_ _Ubiquitous_buffer *__cdecl _Create_ubiquitous_buffer(size_t _Num_elems, size_t _Elem_size)
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( _In_ void _Data,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1761  :
1763  {
1764  if (_Data == NULL) {
1765  throw runtime_exception("Invalid pointer argument (NULL) to array_view constructor", E_INVALIDARG);
1766  }
1767 
1768  _Buffer_ptr _PBuf = _Buffer::_Create_buffer(_Data, accelerator(accelerator::cpu_accelerator).default_view, _Array_extent.size(), _Element_size * sizeof(int));
1771 
1772  // Register the view
1773  _Register();
1774  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:1658
void _Register() __CPU_ONLY
Definition: amp.h:1961
_CRTIMP _In_opt_z_ const wchar_t _In_opt_z_ const wchar_t unsigned int
Definition: crtdefs.h:642
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
#define NULL
Definition: crtdbg.h:30
static _AMPIMP _Ret_ _Buffer *__cdecl _Create_buffer(accelerator_view _Accelerator_view, accelerator_view _Access_on_accelerator_view, size_t _Num_elems, size_t _Elem_size, bool _Is_temp=false, access_type _Cpu_access_type=access_type_auto)
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1111
details::_Reference_counted_obj_ptr< details::_Ubiquitous_buffer > _Ubiquitous_buffer_ptr
Definition: amprt.h:305
Definition: amprt.h:88
static _AMPIMP _Ret_ _Ubiquitous_buffer *__cdecl _Create_ubiquitous_buffer(size_t _Num_elems, size_t _Elem_size)
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( _In_ void _Data,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1777  :
1779  {
1780  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
#define NULL
Definition: crtdbg.h:30
Definition: amprt.h:88
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const void _Data,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1783  :
1785  {
1786  if (_Data == NULL) {
1787  throw runtime_exception("Invalid pointer argument (NULL) to array_view constructor", E_INVALIDARG);
1788  }
1789 
1790  _Buffer_ptr _PBuf = _Buffer::_Create_buffer(const_cast<void*>(_Data), accelerator(accelerator::cpu_accelerator).default_view, _Array_extent.size(), _Element_size * sizeof(int));
1792  _M_buffer_descriptor = _Buffer_descriptor(const_cast<void*>(_Data), _PUBuf, _Read_access, _Read_access);
1793 
1794  // Register the view
1795  _Register();
1796  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:1658
void _Register() __CPU_ONLY
Definition: amp.h:1961
Definition: amprt.h:85
_CRTIMP _In_opt_z_ const wchar_t _In_opt_z_ const wchar_t unsigned int
Definition: crtdefs.h:642
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
#define NULL
Definition: crtdbg.h:30
static _AMPIMP _Ret_ _Buffer *__cdecl _Create_buffer(accelerator_view _Accelerator_view, accelerator_view _Access_on_accelerator_view, size_t _Num_elems, size_t _Elem_size, bool _Is_temp=false, access_type _Cpu_access_type=access_type_auto)
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1111
details::_Reference_counted_obj_ptr< details::_Ubiquitous_buffer > _Ubiquitous_buffer_ptr
Definition: amprt.h:305
static _AMPIMP _Ret_ _Ubiquitous_buffer *__cdecl _Create_ubiquitous_buffer(size_t _Num_elems, size_t _Elem_size)
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const void _Data,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1799  :
1800 #pragma warning( push )
1801 #pragma warning( disable : 4880 )
1802  // Casting away constness in amp restricted scope might result in
1803  // undefined behavior, therefore, the compiler will report a level 1 warning
1804  // for it. But the following const_cast is harmless thus we are suppressing
1805  // this warning just for this line.
1806  _Array_view_shape<_Rank, _Element_size>(0,_Array_extent), _M_buffer_descriptor(const_cast<void*>(_Data), NULL, _Read_access, _Read_access)
1807 #pragma warning( pop )
1808  {
1809  }
Definition: amprt.h:85
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
#define NULL
Definition: crtdbg.h:30

Member Function Documentation

template<int _Rank, int _Element_size>
_Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Access ( const index< _Rank > &  _Index) const
inlineprotected
1829  {
1830  int * _Ptr = reinterpret_cast<int *>(_M_buffer_descriptor._M_data_ptr);
1831  return &_Ptr[_M_total_linear_offset + (_Element_size * _Flatten_helper::func(_M_array_multiplier._M_base, _Index._M_base))];
1832  }
Concurrency::extent< _Rank > _M_array_multiplier
Definition: amp.h:1644
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
void * _M_data_ptr
Definition: amprt.h:432
int _M_total_linear_offset
Definition: amp.h:1646
static _T2 func(const _T1 *_Multiplier, const _T2 *_Index) __GPU
Definition: xxamp.h:925
template<int _Rank, int _Element_size>
_Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Access ( _Access_mode  _Requested_mode,
const index< _Rank > &  _Index 
) const
inlineprotected
1835  {
1836  // Refresh the data ptr if we do not have requested access
1837  if ((_M_buffer_descriptor._M_curr_cpu_access_mode & _Requested_mode) != _Requested_mode) {
1838  _M_buffer_descriptor._Get_CPU_access(_Requested_mode);
1839  }
1840 
1841  return _Access(_Index);
1842  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:444
_AMPIMP void _Get_CPU_access(_Access_mode _Requested_mode) const
_Ret_ void * _Access(const index< _Rank > &_Index) const __GPU
Definition: amp.h:1828
template<int _Rank, int _Element_size>
_Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Access ( _Access_mode  _Requested_mode,
const index< _Rank > &  _Index 
) const
inlineprotected
1845  {
1846  UNREFERENCED_PARAMETER(_Requested_mode);
1847 
1848  return _Access(_Index);
1849  }
_Ret_ void * _Access(const index< _Rank > &_Index) const __GPU
Definition: amp.h:1828
template<int _Rank, int _Element_size>
_Ret_ _View_shape* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_buffer_view_shape ( ) const
inlineprotected
1897  {
1898  unsigned int bufElemSize = static_cast<unsigned int>(_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_buffer_elem_size());
1899  unsigned int elemSize = _Element_size * sizeof(int);
1900 
1901  size_t linearOffsetInBytes = _Base_linear_offset() * sizeof(int);
1902 
1903  size_t baseLSDExtentInBytes = _M_array_extent[_Rank - 1];
1904  baseLSDExtentInBytes *= elemSize;
1905 
1906  size_t viewLSDOffsetInBytes = _M_view_offset[_Rank - 1];
1907  viewLSDOffsetInBytes *= elemSize;
1908 
1909  size_t viewLSDExtentInBytes = _M_view_extent[_Rank - 1];
1910  viewLSDExtentInBytes *= elemSize;
1911 
1912  // The base array extent, view extent, and view offset must be compatible with the underlying
1913  // buffer's element size
1914  if (((linearOffsetInBytes % bufElemSize) != 0) ||
1915  ((baseLSDExtentInBytes % bufElemSize) != 0) ||
1916  ((viewLSDOffsetInBytes % bufElemSize) != 0) ||
1917  ((viewLSDExtentInBytes % bufElemSize) != 0))
1918  {
1919  throw runtime_exception("The array_view base extent, view offset and/or view extent is incompatible with the underlying buffer", E_FAIL);
1920  }
1921 
1922  // The shape to be passed to the underlying buffer for registration must be in terms of
1923  // the element size of the buffer
1924  _ASSERTE((linearOffsetInBytes / bufElemSize) <= UINT_MAX);
1925  unsigned int linearOffset = static_cast<unsigned int>(linearOffsetInBytes / bufElemSize);
1926 
1927  unsigned int baseExtent[_Rank];
1928  unsigned int viewOffset[_Rank];
1929  unsigned int viewExtent[_Rank];
1930 #pragma warning( push )
1931 #pragma warning( disable : 6294 )
1932 #pragma warning( disable : 6201 ) // Index '-1' is out of valid index range '0' to '0' for possibly stack allocated buffer 'baseExtent'.
1933  for (int i = 0; i < _Rank - 1; ++i) {
1934  baseExtent[i] = _M_array_extent[i];
1935  viewOffset[i] = _M_view_offset[i];
1936  viewExtent[i] = _M_view_extent[i];
1937  }
1938 #pragma warning( pop )
1939 
1940  // The extent in the least significant dimension needs to be adjusted for
1941  // difference in element size between the buffer and ourselves
1942  _ASSERTE((baseLSDExtentInBytes / bufElemSize) <= UINT_MAX);
1943  baseExtent[_Rank - 1] = static_cast<unsigned int>(baseLSDExtentInBytes / bufElemSize);
1944 
1945  _ASSERTE((viewLSDOffsetInBytes / bufElemSize) <= UINT_MAX);
1946  viewOffset[_Rank - 1] = static_cast<unsigned int>(viewLSDOffsetInBytes / bufElemSize);
1947 
1948  _ASSERTE((viewLSDExtentInBytes / bufElemSize) <= UINT_MAX);
1949  viewExtent[_Rank - 1] = static_cast<unsigned int>(viewLSDExtentInBytes / bufElemSize);
1950 
1951  return _View_shape::_Create_view_shape(_Rank, linearOffset, baseExtent, viewOffset, viewExtent);
1952  }
size_t _Get_master_buffer_elem_size() const
Definition: amprt.h:3310
_CRTIMP _In_opt_z_ const wchar_t _In_opt_z_ const wchar_t unsigned int
Definition: crtdefs.h:642
static _AMPIMP _Ret_ _View_shape *__cdecl _Create_view_shape(unsigned int _Rank, unsigned int _Linear_offset, const unsigned int *_Base_extent, const unsigned int *_View_offset, const unsigned int *_View_extent, const bool *_Projection_info=NULL)
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
#define UINT_MAX
Definition: limits.h:41
int i[4]
Definition: dvec.h:70
#define _ASSERTE(expr)
Definition: crtdbg.h:216
int _Base_linear_offset() const __GPU
Definition: amp.h:1546
Concurrency::extent< _Rank > _M_view_extent
Definition: amp.h:1647
Concurrency::extent< _Rank > _M_array_extent
Definition: amp.h:1643
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
Concurrency::index< _Rank > _M_view_offset
Definition: amp.h:1645
template<int _Rank, int _Element_size>
static _Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_projection_buffer_shape ( const _Buffer_descriptor _Descriptor,
unsigned int  _Dim,
int  _Dim_offset 
)
inlinestaticprivate
2028  {
2029  _View_shape* _Base_shape = _Get_buffer_view_shape(_Descriptor);
2030 
2031  std::vector<unsigned int> _New_view_extent(_Base_shape->_Get_rank());
2032  std::vector<unsigned int> _New_view_offset(_Base_shape->_Get_rank());
2033  bool *_New_projection_info = new bool[_Base_shape->_Get_rank()];
2034  for (unsigned int _I = 0; _I < _Base_shape->_Get_rank(); ++_I)
2035  {
2036  _New_view_extent[_I] = _Base_shape->_Get_view_extent()[_I];
2037  _New_view_offset[_I] = _Base_shape->_Get_view_offset()[_I];
2038  _New_projection_info[_I] = _Base_shape->_Get_projection_info()[_I];
2039  }
2040 
2041  // The _Dim'th non-projected dimension needs to be found
2042  unsigned int _UnProjectedDimCount = 0;
2043  for (unsigned int _I = 0; _I < _Base_shape->_Get_rank(); ++_I)
2044  {
2045  if (_Base_shape->_Get_projection_info()[_I]) {
2046  continue;
2047  }
2048 
2049  if (_UnProjectedDimCount == _Dim) {
2050  _New_view_extent[_I] = 1;
2051  _New_view_offset[_I] += _Dim_offset;
2052  _New_projection_info[_I] = true;
2053  break;
2054  }
2055  else {
2056  _UnProjectedDimCount++;
2057  }
2058  }
2059 
2060  auto _PView_shape = _View_shape::_Create_view_shape(_Base_shape->_Get_rank(),
2061  _Base_shape->_Get_linear_offset(),
2062  _Base_shape->_Get_base_extent(),
2063  _New_view_offset.data(),
2064  _New_view_extent.data(),
2065  _New_projection_info);
2066 
2067  delete [] _New_projection_info;
2068 
2069  return _PView_shape;
2070  }
const unsigned int * _Get_base_extent() const
Definition: amprt.h:1677
const unsigned int * _Get_view_offset() const
Definition: amprt.h:1682
static _AMPIMP _Ret_ _View_shape *__cdecl _Create_view_shape(unsigned int _Rank, unsigned int _Linear_offset, const unsigned int *_Base_extent, const unsigned int *_View_offset, const unsigned int *_View_extent, const bool *_Projection_info=NULL)
const bool * _Get_projection_info() const
Definition: amprt.h:1691
const unsigned int * _Get_view_extent() const
Definition: amprt.h:1686
unsigned int _Get_linear_offset() const
Definition: amprt.h:1672
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3463
Definition: amprt.h:1657
unsigned int _Get_rank() const
Definition: amprt.h:1667
template<int _Rank, int _Element_size>
static _Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_projection_buffer_shape ( const _Buffer_descriptor _Descriptor,
int  _Dim,
int  _I 
)
inlinestaticprivate
2161  {
2162  UNREFERENCED_PARAMETER(_Descriptor);
2163  UNREFERENCED_PARAMETER(_Dim);
2164  UNREFERENCED_PARAMETER(_I);
2165 
2166  return NULL;
2167  }
#define NULL
Definition: crtdbg.h:30
template<int _Rank, int _Element_size>
static _Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_section_buffer_shape ( const _Buffer_descriptor _Descriptor,
const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
)
inlinestaticprivate
2074  {
2075  _View_shape* _Base_shape = _Get_buffer_view_shape(_Descriptor);
2076  if (_Base_shape->_Get_rank() == _Rank) {
2077  return NULL;
2078  }
2079 
2080  std::vector<unsigned int> _New_view_extent(_Base_shape->_Get_rank());
2081  std::vector<unsigned int> _New_view_offset(_Base_shape->_Get_rank());
2082  unsigned int _I = 0, _J = 0;
2083  while (_I < _Base_shape->_Get_rank())
2084  {
2085  if (_Base_shape->_Get_projection_info()[_I])
2086  {
2087  _New_view_extent[_I] = _Base_shape->_Get_view_extent()[_I];
2088  _New_view_offset[_I] = _Base_shape->_Get_view_offset()[_I];
2089  }
2090  else
2091  {
2092  // If _J is the least significant dimension, then we need to adjust the
2093  // offset and extent for the underlying buffer's element size
2094  if (_J == (_Rank - 1))
2095  {
2096  unsigned int bufElemSize = static_cast<unsigned int>(_Descriptor._Get_buffer_ptr()->_Get_master_buffer_elem_size());
2097  unsigned int elemSize = _Element_size * sizeof(int);
2098 
2099  size_t sectionLSDOriginInBytes = _Section_origin[_J];
2100  sectionLSDOriginInBytes *= elemSize;
2101 
2102  size_t sectionLSDExtentInBytes = _Section_extent[_J];
2103  sectionLSDExtentInBytes *= elemSize;
2104 
2105  // The section offset and extent must be compatible with the underlying
2106  // buffer's element size
2107  if (((sectionLSDOriginInBytes % bufElemSize) != 0) ||
2108  ((sectionLSDExtentInBytes % bufElemSize) != 0))
2109  {
2110  throw runtime_exception("The array_view section origin and/or extent is incompatible with the underlying buffer", E_FAIL);
2111  }
2112 
2113  // The extent in the least significant dimension needs to be adjusted for
2114  // difference in element size between the buffer and ourselves
2115  _ASSERTE((sectionLSDOriginInBytes / bufElemSize) <= UINT_MAX);
2116  _New_view_offset[_I] = _Base_shape->_Get_view_offset()[_I] + static_cast<unsigned int>(sectionLSDOriginInBytes / bufElemSize);
2117 
2118  _ASSERTE((sectionLSDExtentInBytes / bufElemSize) <= UINT_MAX);
2119  _New_view_extent[_I] = static_cast<unsigned int>(sectionLSDExtentInBytes / bufElemSize);
2120  }
2121  else
2122  {
2123  _New_view_extent[_I] = _Section_extent[_J];
2124  _New_view_offset[_I] = _Base_shape->_Get_view_offset()[_I] + _Section_origin[_J];
2125  }
2126 
2127  _J++;
2128  }
2129 
2130  _I++;
2131  }
2132 
2133  _ASSERTE(_J == _Rank);
2134 
2135  return _View_shape::_Create_view_shape(_Base_shape->_Get_rank(),
2136  _Base_shape->_Get_linear_offset(),
2137  _Base_shape->_Get_base_extent(),
2138  _New_view_offset.data(),
2139  _New_view_extent.data(),
2140  _Base_shape->_Get_projection_info());
2141  }
size_t _Get_master_buffer_elem_size() const
Definition: amprt.h:3310
const unsigned int * _Get_base_extent() const
Definition: amprt.h:1677
const unsigned int * _Get_view_offset() const
Definition: amprt.h:1682
_CRTIMP _In_opt_z_ const wchar_t _In_opt_z_ const wchar_t unsigned int
Definition: crtdefs.h:642
static _AMPIMP _Ret_ _View_shape *__cdecl _Create_view_shape(unsigned int _Rank, unsigned int _Linear_offset, const unsigned int *_Base_extent, const unsigned int *_View_offset, const unsigned int *_View_extent, const bool *_Projection_info=NULL)
const bool * _Get_projection_info() const
Definition: amprt.h:1691
#define NULL
Definition: crtdbg.h:30
#define UINT_MAX
Definition: limits.h:41
const unsigned int * _Get_view_extent() const
Definition: amprt.h:1686
#define _ASSERTE(expr)
Definition: crtdbg.h:216
unsigned int _Get_linear_offset() const
Definition: amprt.h:1672
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3463
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
Definition: amprt.h:1657
unsigned int _Get_rank() const
Definition: amprt.h:1667
template<int _Rank, int _Element_size>
static _Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_section_buffer_shape ( const _Buffer_descriptor _Descriptor,
const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
)
inlinestaticprivate
2170  {
2171  UNREFERENCED_PARAMETER(_Descriptor);
2172  UNREFERENCED_PARAMETER(_Section_origin);
2173  UNREFERENCED_PARAMETER(_Section_extent);
2174 
2175  return NULL;
2176  }
#define NULL
Definition: crtdbg.h:30
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Project0 ( int  _I,
_Array_view_base< _Rank-1, _Element_size > &  _Projected_view 
) const
inlineprotected
1867  {
1868  _Projected_view._M_buffer_descriptor = this->_M_buffer_descriptor;
1870 
1871  // Register the constructed view with the projection buffer view shape
1873  }
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
void _Project0(int _I, _Array_view_shape< _Rank-1, _Element_size > &_Projected_shape) const __GPU
Definition: amp.h:1608
static _Ret_ void * _Create_projection_buffer_shape(const _Buffer_descriptor &_Descriptor, unsigned int _Dim, int _Dim_offset) __CPU_ONLY
Definition: amp.h:2027
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register ( )
inlineprivate
1962  {
1964  accelerator(accelerator::cpu_accelerator).default_view,
1966 
1968  {
1969  _Buffer_ptr _PBuf;
1971  accelerator(accelerator::cpu_accelerator).default_view,
1973  _PBuf)._Get();
1974 
1975  _M_buffer_descriptor._M_data_ptr = _PBuf->_Get_host_ptr();
1976  }
1977  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
_AMPIMP void _Register_view(_In_ _View_key _Key, accelerator_view _Cpu_av, _View_shape_ptr _Shape)
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
_Ret_ _View_shape * _Create_buffer_view_shape() const __CPU_ONLY
Definition: amp.h:1896
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1111
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:444
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
void * _M_data_ptr
Definition: amprt.h:432
Definition: amprt.h:84
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register ( _In_ void _Shape)
inlineprivate
1985  {
1986  if (_Shape == NULL) {
1987  return;
1988  }
1989 
1990  // Unregister and register with the right shape
1991  _Unregister();
1992 
1994  accelerator(accelerator::cpu_accelerator).default_view,
1995  reinterpret_cast<_View_shape*>(_Shape));
1996 
1998  {
1999  _Buffer_ptr _PBuf;
2001  accelerator(accelerator::cpu_accelerator).default_view,
2003  _PBuf)._Get();
2004 
2005  _M_buffer_descriptor._M_data_ptr = _PBuf->_Get_host_ptr();
2006  }
2007  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
_AMPIMP void _Register_view(_In_ _View_key _Key, accelerator_view _Cpu_av, _View_shape_ptr _Shape)
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
#define NULL
Definition: crtdbg.h:30
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1111
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:444
void _Unregister(bool _Throw_exception=true) __CPU_ONLY
Definition: amp.h:2009
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
void * _M_data_ptr
Definition: amprt.h:432
Definition: amprt.h:84
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
Definition: amprt.h:1657
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register ( )
inlineprivate
2143 {}
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register ( _In_ void _Shape)
inlineprivate
2151  {
2152  UNREFERENCED_PARAMETER(_Shape);
2153  }
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register_copy ( const _Array_view_base< _Rank, _Element_size > &  _Other)
inlineprivate
1980  {
1982  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
_AMPIMP void _Register_view_copy(_In_ _View_key _New_view_key, _In_ _View_key _Existing_view_key)
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register_copy ( const _Array_view_base< _Rank, _Element_size > &  _Other)
inlineprivate
2146  {
2147  UNREFERENCED_PARAMETER(_Other);
2148  }
template<int _Rank, int _Element_size>
template<int _New_element_size>
_Array_view_base<_Rank,_New_element_size> Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Reinterpret_as ( ) const
inlineprotected
1877  {
1878  static_assert(_Rank==1, "reinterpret_as is only permissible on array views of rank 1");
1879  int _New_size = _Calculate_reinterpreted_size<_Element_size,_New_element_size>(_M_view_extent.size());
1882  Concurrency::extent<_Rank>(_New_size));
1883  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
Concurrency::extent< _Rank > _M_view_extent
Definition: amp.h:1647
The extent type represents an N-dimensional vector of int which specifies the bounds of an N-dimen...
Definition: amp.h:383
int _M_total_linear_offset
Definition: amp.h:1646
template<int _Rank, int _Element_size>
_Array_view_base Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Section ( const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
) const
inlineprotected
1852  {
1853  auto _View = _Array_view_base(*this, _Section_origin, _Section_extent);
1854 
1855  // Register the constructed view with the section buffer view shape
1856  _View._Register(_Array_view_base::_Create_section_buffer_shape(this->_M_buffer_descriptor, _Section_origin, _Section_extent));
1857 
1858  return _View;
1859  }
static _Ret_ void * _Create_section_buffer_shape(const _Buffer_descriptor &_Descriptor, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __CPU_ONLY
Definition: amp.h:2072
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
_Array_view_base() __GPU
Definition: amp.h:1668
template<int _Rank, int _Element_size>
_Array_view_base Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Section ( const index< _Rank > &  _Idx) const
inlineprotected
1862  {
1863  return _Section(_Idx, this->extent - _Idx);
1864  }
_Array_view_base _Section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Definition: amp.h:1851
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Unregister ( bool  _Throw_exception = true)
inlineprivate
2010  {
2011  if (!_Throw_exception && (std::current_exception() == nullptr)) {
2012  _Throw_exception = true;
2013  }
2014 
2015  try
2016  {
2018  }
2019  catch(...)
2020  {
2021  if (_Throw_exception) {
2022  throw;
2023  }
2024  }
2025  }
exception_ptr current_exception()
Definition: exception:527
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
_AMPIMP void _Unregister_view(_In_ _View_key _Key)
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Unregister ( bool  _Throw_exception = true)
inlineprivate
2156  {
2157  UNREFERENCED_PARAMETER(_Throw_exception);
2158  }
template<int _Rank, int _Element_size>
template<int _New_rank>
_Array_view_base<_New_rank, _Element_size> Concurrency::details::_Array_view_base< _Rank, _Element_size >::_View_as ( const Concurrency::extent< _New_rank > &  _View_extent) const
inlineprotected
1887  {
1888  static_assert(_Rank==1, "view_as is only permissible on array views of rank 1");
1891  _View_extent,
1892  index<_New_rank>(),
1893  _View_extent);
1894  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
int _M_total_linear_offset
Definition: amp.h:1646
template<int _Rank, int _Element_size>
_Array_view_base& Concurrency::details::_Array_view_base< _Rank, _Element_size >::operator= ( const _Array_view_base< _Rank, _Element_size > &  _Other)
inlineprotected
1812  {
1813  if (this != &_Other)
1814  {
1815  // Unregister the current view
1816  _Unregister();
1817 
1820 
1821  // Register the new view
1822  _Register_copy(_Other);
1823  }
1824 
1825  return *this;
1826  }
void _Register_copy(const _Array_view_base &_Other) __CPU_ONLY
Definition: amp.h:1979
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
void _Unregister(bool _Throw_exception=true) __CPU_ONLY
Definition: amp.h:2009
_Array_view_shape & operator=(const _Array_view_shape &_Other) __GPU
Definition: amp.h:1598

Friends And Related Function Documentation

template<int _Rank, int _Element_size>
template<int _R, int _S>
friend class _Array_view_base
friend

Member Data Documentation

template<int _Rank, int _Element_size>
_Buffer_descriptor Concurrency::details::_Array_view_base< _Rank, _Element_size >::_M_buffer_descriptor
protected

The documentation for this class was generated from the following file: