STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Public Types | Public Member Functions | Protected Member Functions | Protected Attributes | Private Types | Private Member Functions | Static Private Member Functions | Friends | List of all members
Concurrency::details::_Array_view_base< _Rank, _Element_size > Class Template Reference

#include <amp.h>

Inheritance diagram for Concurrency::details::_Array_view_base< _Rank, _Element_size >:
Concurrency::details::_Array_view_shape< _Rank, _Element_size >

Public Types

typedef details::_Buffer_descriptor _Buffer_descriptor
 

Public Member Functions

 ~_Array_view_base () __GPU
 
- Public Member Functions inherited from Concurrency::details::_Array_view_shape< _Rank, _Element_size >
__declspec(property(get=get_extent)) Concurrency Concurrency::extent< _Rank > get_extent () const __GPU
 The extent of this array or view. More...
 
 ~_Array_view_shape () __GPU
 

Protected Member Functions

 _Array_view_base () __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, const _Array_view_shape< _Rank, _Element_size > &_Shape) __GPU
 
 _Array_view_base (const _Array_view_base &_Other) __GPU
 
 _Array_view_base (const _Array_view_base &_Other, const Concurrency::extent< _Rank > &_Array_extent) __GPU
 
 _Array_view_base (const _Array_view_base &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, const Concurrency::extent< _Rank > &_Array_extent) __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent) __GPU
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_View_offset, const Concurrency::extent< _Rank > &_View_extent) __CPU_ONLY
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_View_offset, const Concurrency::extent< _Rank > &_View_extent) __GPU_ONLY
 
 _Array_view_base (const _Buffer_descriptor &_Buffer_desc, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
 
 _Array_view_base (const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
 
 _Array_view_base (_In_ void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
 
 _Array_view_base (_In_ void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __GPU_ONLY
 
 _Array_view_base (const void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
 
 _Array_view_base (const void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __GPU_ONLY
 
_Array_view_baseoperator= (const _Array_view_base &_Other) __GPU
 
_Ret_ void_Access (const index< _Rank > &_Index) const __GPU
 
_Ret_ void_Access (_Access_mode _Requested_mode, const index< _Rank > &_Index) const __CPU_ONLY
 
_Ret_ void_Access (_Access_mode, const index< _Rank > &_Index) const __GPU_ONLY
 
_Array_view_base _Section (const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
 
_Array_view_base _Section (const index< _Rank > &_Idx) const __GPU
 
void _Project0 (int _I, _Array_view_base< _Rank-1, _Element_size > &_Projected_view) const __GPU
 
template<int _New_element_size>
_Array_view_base< _Rank, _New_element_size > _Reinterpret_as () const __GPU
 
template<int _New_rank>
_Array_view_base< _New_rank, _Element_size > _View_as (const Concurrency::extent< _New_rank > &_View_extent) const __GPU
 
_Ret_ _View_shape_Create_buffer_view_shape () const __CPU_ONLY
 
- Protected Member Functions inherited from Concurrency::details::_Array_view_shape< _Rank, _Element_size >
int _Base_linear_offset () const __GPU
 
 _Array_view_shape (const _Array_view_shape &_Other) __GPU
 
 _Array_view_shape (const _Array_view_shape &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
 
 _Array_view_shape (int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent) __GPU
 
 _Array_view_shape (int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
 
_Array_view_shapeoperator= (const _Array_view_shape &_Other) __GPU
 
void _Project0 (int _I, _Array_view_shape< _Rank-1, _Element_size > &_Projected_shape) const __GPU
 
 _Array_view_shape () __GPU
 

Protected Attributes

_Buffer_descriptor _M_buffer_descriptor
 
- Protected Attributes inherited from Concurrency::details::_Array_view_shape< _Rank, _Element_size >
Concurrency::extent< _Rank > _M_array_extent
 
Concurrency::extent< _Rank > _M_array_multiplier
 
Concurrency::index< _Rank > _M_view_offset
 
int _M_total_linear_offset
 
Concurrency::extent< _Rank > _M_view_extent
 

Private Types

typedef _Array_flatten_helper< _Rank, typename Concurrency::extent< _Rank >::value_type, typename Concurrency::index< _Rank >::value_type > _Flatten_helper
 

Private Member Functions

void _Register (_In_opt_ const _View_key _Source_view_key=nullptr) __CPU_ONLY
 
void _Register_copy (const _Array_view_base &_Other) __CPU_ONLY
 
void _Register (_In_ void *_Shape) __CPU_ONLY
 
void _Unregister (bool _Throw_exception=true) __CPU_ONLY
 
void _Register () __GPU_ONLY
 
void _Register_copy (const _Array_view_base &) __GPU_ONLY
 
void _Register (_In_ void *) __GPU_ONLY
 
void _Unregister (bool=true) __GPU_ONLY
 

Static Private Member Functions

static _Ret_ void_Create_projection_buffer_shape (const _Buffer_descriptor &_Descriptor, unsigned int _Dim, int _Dim_offset) __CPU_ONLY
 
static _Ret_ void_Create_section_buffer_shape (const _Buffer_descriptor &_Descriptor, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __CPU_ONLY
 
static _Ret_ void_Create_projection_buffer_shape (const _Buffer_descriptor &, int, int) __GPU_ONLY
 
static _Ret_ void_Create_section_buffer_shape (const _Buffer_descriptor &, const Concurrency::index< _Rank > &, const Concurrency::extent< _Rank > &) __GPU_ONLY
 

Friends

template<int _R, int _S>
class _Array_view_base
 

Member Typedef Documentation

template<int _Rank, int _Element_size>
typedef details::_Buffer_descriptor Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Buffer_descriptor
template<int _Rank, int _Element_size>
typedef _Array_flatten_helper<_Rank, typename Concurrency::extent<_Rank>::value_type, typename Concurrency::index<_Rank>::value_type> Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Flatten_helper
private

Constructor & Destructor Documentation

template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::~_Array_view_base ( )
inline
1688  {
1689  // Unregister the view; Do not throw exception
1690  _Unregister(false);
1691  }
void _Unregister(bool _Throw_exception=true) __CPU_ONLY
Definition: amp.h:2056
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( )
inlineprotected
1695 {}
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
const _Array_view_shape< _Rank, _Element_size > &  _Shape 
)
inlineprotected
1698  :
1699  _M_buffer_descriptor(_Buffer_desc),
1701  {
1702  // Register the view
1703  _Register();
1704  }
void _Register() __GPU_ONLY
Definition: amp.h:2190
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Array_view_base< _Rank, _Element_size > &  _Other)
inlineprotected
1707  :
1710  {
1711  // Register the view
1712  _Register_copy(_Other);
1713 
1714  // update this buffer descriptor in case _Register_copy was late and missed the update opportunity.
1716  }
void _Register_copy(const _Array_view_base &_Other) __CPU_ONLY
Definition: amp.h:2026
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Array_view_base< _Rank, _Element_size > &  _Other,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1719  :
1722  {
1723  // Register the view
1724  _Register();
1725  }
void _Register() __GPU_ONLY
Definition: amp.h:2190
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
int _Base_linear_offset() const __GPU
Definition: amp.h:1571
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Array_view_base< _Rank, _Element_size > &  _Other,
const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
)
inlineprotected
1728  :
1730  _Array_view_shape<_Rank, _Element_size>(_Other, _Section_origin, _Section_extent)
1731  {
1732  // Register the view
1733  _Register();
1734  }
void _Register() __GPU_ONLY
Definition: amp.h:2190
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1737  :
1738  _M_buffer_descriptor(_Buffer_desc),
1740  {
1741  // Register the view
1742  _Register();
1743  }
void _Register() __GPU_ONLY
Definition: amp.h:2190
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
int  _Base_linear_offset,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1746  :
1747  _M_buffer_descriptor(_Buffer_desc),
1749  {
1750  // Register the view
1751  _Register();
1752  }
void _Register() __GPU_ONLY
Definition: amp.h:2190
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
int _Base_linear_offset() const __GPU
Definition: amp.h:1571
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
int  _Base_linear_offset,
const Concurrency::extent< _Rank > &  _Array_extent,
const Concurrency::index< _Rank > &  _View_offset,
const Concurrency::extent< _Rank > &  _View_extent 
)
inlineprotected
1761  :
1762  _M_buffer_descriptor(_Buffer_desc),
1763  _Array_view_shape<_Rank, _Element_size>(_Base_linear_offset,_Array_extent,_View_offset,_View_extent)
1764  {
1765  // Register the view
1766  _Register(_Buffer_desc._Get_view_key());
1767  }
void _Register() __GPU_ONLY
Definition: amp.h:2190
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
int _Base_linear_offset() const __GPU
Definition: amp.h:1571
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
int  _Base_linear_offset,
const Concurrency::extent< _Rank > &  _Array_extent,
const Concurrency::index< _Rank > &  _View_offset,
const Concurrency::extent< _Rank > &  _View_extent 
)
inlineprotected
1776  :
1777  _M_buffer_descriptor(_Buffer_desc),
1778  _Array_view_shape<_Rank, _Element_size>(_Base_linear_offset,_Array_extent,_View_offset,_View_extent)
1779  {
1780  // Register the view
1781  _Register();
1782  }
void _Register() __GPU_ONLY
Definition: amp.h:2190
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
int _Base_linear_offset() const __GPU
Definition: amp.h:1571
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const _Buffer_descriptor _Buffer_desc,
const Concurrency::extent< _Rank > &  _Array_extent,
const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
)
inlineprotected
1786  :
1787  _M_buffer_descriptor(_Buffer_desc),
1788  _Array_view_shape<_Rank, _Element_size>(0,_Array_extent,_Section_origin,_Section_extent)
1789  {
1790  // Register the view
1791  _Register();
1792  }
void _Register() __GPU_ONLY
Definition: amp.h:2190
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const Concurrency::extent< _Rank > &  _Array_extent)
inlineprotected
1795  :
1797  {
1798  _Ubiquitous_buffer_ptr _PUBuf = _Ubiquitous_buffer::_Create_ubiquitous_buffer(_Array_extent.size(), _Element_size * sizeof(int));
1800 
1801  // Register the view
1802  _Register();
1803  }
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:1685
void _Register() __GPU_ONLY
Definition: amp.h:2190
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
details::_Reference_counted_obj_ptr< details::_Ubiquitous_buffer > _Ubiquitous_buffer_ptr
Definition: amprt.h:311
char int *typedef int(__CRTDECL *_CRT_REPORT_HOOKW)(int
Definition: crtdbg.h:45
Definition: amprt.h:90
#define NULL
Definition: corecrt.h:158
static _AMPIMP _Ret_ _Ubiquitous_buffer *__cdecl _Create_ubiquitous_buffer(size_t _Num_elems, size_t _Elem_size)
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( _In_ void _Data,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1806  :
1808  {
1809  if (_Data == NULL) {
1810  throw runtime_exception("Invalid pointer argument (NULL) to array_view constructor", E_INVALIDARG);
1811  }
1812 
1813  _Buffer_ptr _PBuf = _Buffer::_Create_buffer(_Data, accelerator(accelerator::cpu_accelerator).default_view, _Array_extent.size(), _Element_size * sizeof(int));
1816 
1817  // Register the view
1818  _Register();
1819  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:1685
void _Register() __GPU_ONLY
Definition: amp.h:2190
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
static _AMPIMP _Ret_ _Buffer *__cdecl _Create_buffer(accelerator_view _Accelerator_view, accelerator_view _Access_on_accelerator_view, size_t _Num_elems, size_t _Elem_size, bool _Is_temp=false, access_type _Cpu_access_type=access_type_auto)
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1035
details::_Reference_counted_obj_ptr< details::_Ubiquitous_buffer > _Ubiquitous_buffer_ptr
Definition: amprt.h:311
char int *typedef int(__CRTDECL *_CRT_REPORT_HOOKW)(int
Definition: crtdbg.h:45
Definition: amprt.h:94
#define NULL
Definition: corecrt.h:158
static _AMPIMP _Ret_ _Ubiquitous_buffer *__cdecl _Create_ubiquitous_buffer(size_t _Num_elems, size_t _Elem_size)
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( _In_ void _Data,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1822  :
1824  {
1825  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
Definition: amprt.h:94
#define NULL
Definition: corecrt.h:158
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const void _Data,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1828  :
1830  {
1831  if (_Data == NULL) {
1832  throw runtime_exception("Invalid pointer argument (NULL) to array_view constructor", E_INVALIDARG);
1833  }
1834 
1835  _Buffer_ptr _PBuf = _Buffer::_Create_buffer(const_cast<void*>(_Data), accelerator(accelerator::cpu_accelerator).default_view, _Array_extent.size(), _Element_size * sizeof(int));
1837  _M_buffer_descriptor = _Buffer_descriptor(const_cast<void*>(_Data), _PUBuf, _Read_access, _Read_access);
1838 
1839  // Register the view
1840  _Register();
1841  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:1685
Definition: amprt.h:91
void _Register() __GPU_ONLY
Definition: amp.h:2190
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
static _AMPIMP _Ret_ _Buffer *__cdecl _Create_buffer(accelerator_view _Accelerator_view, accelerator_view _Access_on_accelerator_view, size_t _Num_elems, size_t _Elem_size, bool _Is_temp=false, access_type _Cpu_access_type=access_type_auto)
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1035
details::_Reference_counted_obj_ptr< details::_Ubiquitous_buffer > _Ubiquitous_buffer_ptr
Definition: amprt.h:311
char int *typedef int(__CRTDECL *_CRT_REPORT_HOOKW)(int
Definition: crtdbg.h:45
#define NULL
Definition: corecrt.h:158
static _AMPIMP _Ret_ _Ubiquitous_buffer *__cdecl _Create_ubiquitous_buffer(size_t _Num_elems, size_t _Elem_size)
template<int _Rank, int _Element_size>
Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Array_view_base ( const void _Data,
const Concurrency::extent< _Rank > &  _Array_extent 
)
inlineprotected
1844  :
1845 #pragma warning( push )
1846 #pragma warning( disable : 4880 )
1847  // Casting away constness in amp restricted scope might result in
1848  // undefined behavior, therefore, the compiler will report a level 1 warning
1849  // for it. But the following const_cast is harmless thus we are suppressing
1850  // this warning just for this line.
1851  _Array_view_shape<_Rank, _Element_size>(0,_Array_extent), _M_buffer_descriptor(const_cast<void*>(_Data), NULL, _Read_access, _Read_access)
1852 #pragma warning( pop )
1853  {
1854  }
Definition: amprt.h:91
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
#define NULL
Definition: corecrt.h:158

Member Function Documentation

template<int _Rank, int _Element_size>
_Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Access ( const index< _Rank > &  _Index) const
inlineprotected
1877  {
1878  int * _Ptr = reinterpret_cast<int *>(_M_buffer_descriptor._M_data_ptr);
1879  return &_Ptr[this->_M_total_linear_offset + (_Element_size * _Flatten_helper::func(this->_M_array_multiplier._M_base, _Index._M_base))];
1880  }
Concurrency::extent< _Rank > _M_array_multiplier
Definition: amp.h:1669
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
_In_ size_t _In_ int _Index
Definition: time.h:102
void * _M_data_ptr
Definition: amprt.h:438
int _M_total_linear_offset
Definition: amp.h:1671
static _T2 func(const _T1 *_Multiplier, const _T2 *_Index) __GPU
Definition: xxamp.h:924
template<int _Rank, int _Element_size>
_Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Access ( _Access_mode  _Requested_mode,
const index< _Rank > &  _Index 
) const
inlineprotected
1883  {
1884  // Refresh the data ptr if we do not have requested access
1885  if ((_M_buffer_descriptor._M_curr_cpu_access_mode & _Requested_mode) != _Requested_mode) {
1886  _M_buffer_descriptor._Get_CPU_access(_Requested_mode);
1887  }
1888 
1889  return _Access(_Index);
1890  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
_In_ size_t _In_ int _Index
Definition: time.h:102
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:450
_AMPIMP void _Get_CPU_access(_Access_mode _Requested_mode) const
_Ret_ void * _Access(const index< _Rank > &_Index) const __GPU
Definition: amp.h:1876
template<int _Rank, int _Element_size>
_Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Access ( _Access_mode  ,
const index< _Rank > &  _Index 
) const
inlineprotected
1893  {
1894  return _Access(_Index);
1895  }
_In_ size_t _In_ int _Index
Definition: time.h:102
_Ret_ void * _Access(const index< _Rank > &_Index) const __GPU
Definition: amp.h:1876
template<int _Rank, int _Element_size>
_Ret_ _View_shape* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_buffer_view_shape ( ) const
inlineprotected
1943  {
1944  unsigned int bufElemSize = static_cast<unsigned int>(_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_buffer_elem_size());
1945  unsigned int elemSize = _Element_size * sizeof(int);
1946 
1947  size_t linearOffsetInBytes = this->_Base_linear_offset() * sizeof(int);
1948 
1949  size_t baseLSDExtentInBytes = this->_M_array_extent[_Rank - 1];
1950  baseLSDExtentInBytes *= elemSize;
1951 
1952  size_t viewLSDOffsetInBytes = this->_M_view_offset[_Rank - 1];
1953  viewLSDOffsetInBytes *= elemSize;
1954 
1955  size_t viewLSDExtentInBytes = this->_M_view_extent[_Rank - 1];
1956  viewLSDExtentInBytes *= elemSize;
1957 
1958  // The base array extent, view extent, and view offset must be compatible with the underlying
1959  // buffer's element size
1960  if (((linearOffsetInBytes % bufElemSize) != 0) ||
1961  ((baseLSDExtentInBytes % bufElemSize) != 0) ||
1962  ((viewLSDOffsetInBytes % bufElemSize) != 0) ||
1963  ((viewLSDExtentInBytes % bufElemSize) != 0))
1964  {
1965  throw runtime_exception("The array_view base extent, view offset and/or view extent is incompatible with the underlying buffer", E_FAIL);
1966  }
1967 
1968  // The shape to be passed to the underlying buffer for registration must be in terms of
1969  // the element size of the buffer
1970  _ASSERTE((linearOffsetInBytes / bufElemSize) <= UINT_MAX);
1971  unsigned int linearOffset = static_cast<unsigned int>(linearOffsetInBytes / bufElemSize);
1972 
1973  unsigned int baseExtent[_Rank];
1974  unsigned int viewOffset[_Rank];
1975  unsigned int viewExtent[_Rank];
1976 #pragma warning( push )
1977 #pragma warning( disable : 6294 )
1978 #pragma warning( disable : 6201 ) // Index '-1' is out of valid index range '0' to '0' for possibly stack allocated buffer 'baseExtent'.
1979  for (int i = 0; i < _Rank - 1; ++i) {
1980  baseExtent[i] = this->_M_array_extent[i];
1981  viewOffset[i] = this->_M_view_offset[i];
1982  viewExtent[i] = this->_M_view_extent[i];
1983  }
1984 #pragma warning( pop )
1985 
1986  // The extent in the least significant dimension needs to be adjusted for
1987  // difference in element size between the buffer and ourselves
1988  _ASSERTE((baseLSDExtentInBytes / bufElemSize) <= UINT_MAX);
1989  baseExtent[_Rank - 1] = static_cast<unsigned int>(baseLSDExtentInBytes / bufElemSize);
1990 
1991  _ASSERTE((viewLSDOffsetInBytes / bufElemSize) <= UINT_MAX);
1992  viewOffset[_Rank - 1] = static_cast<unsigned int>(viewLSDOffsetInBytes / bufElemSize);
1993 
1994  _ASSERTE((viewLSDExtentInBytes / bufElemSize) <= UINT_MAX);
1995  viewExtent[_Rank - 1] = static_cast<unsigned int>(viewLSDExtentInBytes / bufElemSize);
1996 
1997  return _View_shape::_Create_view_shape(_Rank, linearOffset, baseExtent, viewOffset, viewExtent);
1998  }
size_t _Get_master_buffer_elem_size() const
Definition: amprt.h:3236
static _AMPIMP _Ret_ _View_shape *__cdecl _Create_view_shape(unsigned int _Rank, unsigned int _Linear_offset, const unsigned int *_Base_extent, const unsigned int *_View_offset, const unsigned int *_View_extent, const bool *_Projection_info=NULL)
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
#define UINT_MAX
Definition: limits.h:36
int i[4]
Definition: dvec.h:68
#define _ASSERTE(expr)
Definition: crtdbg.h:707
int _Base_linear_offset() const __GPU
Definition: amp.h:1571
Concurrency::extent< _Rank > _M_view_extent
Definition: amp.h:1672
char int *typedef int(__CRTDECL *_CRT_REPORT_HOOKW)(int
Definition: crtdbg.h:45
Concurrency::extent< _Rank > _M_array_extent
Definition: amp.h:1668
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
Concurrency::index< _Rank > _M_view_offset
Definition: amp.h:1670
template<int _Rank, int _Element_size>
static _Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_projection_buffer_shape ( const _Buffer_descriptor _Descriptor,
unsigned int  _Dim,
int  _Dim_offset 
)
inlinestaticprivate
2075  {
2076  _View_shape* _Base_shape = _Get_buffer_view_shape(_Descriptor);
2077 
2078  std::vector<unsigned int> _New_view_extent(_Base_shape->_Get_rank());
2079  std::vector<unsigned int> _New_view_offset(_Base_shape->_Get_rank());
2080  bool *_New_projection_info = new bool[_Base_shape->_Get_rank()];
2081  for (unsigned int _I = 0; _I < _Base_shape->_Get_rank(); ++_I)
2082  {
2083  _New_view_extent[_I] = _Base_shape->_Get_view_extent()[_I];
2084  _New_view_offset[_I] = _Base_shape->_Get_view_offset()[_I];
2085  _New_projection_info[_I] = _Base_shape->_Get_projection_info()[_I];
2086  }
2087 
2088  // The _Dim'th non-projected dimension needs to be found
2089  unsigned int _UnProjectedDimCount = 0;
2090  for (unsigned int _I = 0; _I < _Base_shape->_Get_rank(); ++_I)
2091  {
2092  if (_Base_shape->_Get_projection_info()[_I]) {
2093  continue;
2094  }
2095 
2096  if (_UnProjectedDimCount == _Dim) {
2097  _New_view_extent[_I] = 1;
2098  _New_view_offset[_I] += _Dim_offset;
2099  _New_projection_info[_I] = true;
2100  break;
2101  }
2102  else {
2103  _UnProjectedDimCount++;
2104  }
2105  }
2106 
2107  auto _PView_shape = _View_shape::_Create_view_shape(_Base_shape->_Get_rank(),
2108  _Base_shape->_Get_linear_offset(),
2109  _Base_shape->_Get_base_extent(),
2110  _New_view_offset.data(),
2111  _New_view_extent.data(),
2112  _New_projection_info);
2113 
2114  delete [] _New_projection_info;
2115 
2116  return _PView_shape;
2117  }
const unsigned int * _Get_base_extent() const
Definition: amprt.h:1601
const unsigned int * _Get_view_offset() const
Definition: amprt.h:1606
static _AMPIMP _Ret_ _View_shape *__cdecl _Create_view_shape(unsigned int _Rank, unsigned int _Linear_offset, const unsigned int *_Base_extent, const unsigned int *_View_offset, const unsigned int *_View_extent, const bool *_Projection_info=NULL)
const bool * _Get_projection_info() const
Definition: amprt.h:1615
const unsigned int * _Get_view_extent() const
Definition: amprt.h:1610
unsigned int _Get_linear_offset() const
Definition: amprt.h:1596
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
Definition: amprt.h:1581
unsigned int _Get_rank() const
Definition: amprt.h:1591
template<int _Rank, int _Element_size>
static _Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_projection_buffer_shape ( const _Buffer_descriptor ,
int  ,
int   
)
inlinestaticprivate
2205  {
2206  return NULL;
2207  }
#define NULL
Definition: corecrt.h:158
template<int _Rank, int _Element_size>
static _Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_section_buffer_shape ( const _Buffer_descriptor _Descriptor,
const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
)
inlinestaticprivate
2121  {
2122  _View_shape* _Base_shape = _Get_buffer_view_shape(_Descriptor);
2123  if (_Base_shape->_Get_rank() == _Rank) {
2124  return NULL;
2125  }
2126 
2127  std::vector<unsigned int> _New_view_extent(_Base_shape->_Get_rank());
2128  std::vector<unsigned int> _New_view_offset(_Base_shape->_Get_rank());
2129  unsigned int _I = 0, _J = 0;
2130  while (_I < _Base_shape->_Get_rank())
2131  {
2132  if (_Base_shape->_Get_projection_info()[_I])
2133  {
2134  _New_view_extent[_I] = _Base_shape->_Get_view_extent()[_I];
2135  _New_view_offset[_I] = _Base_shape->_Get_view_offset()[_I];
2136  }
2137  else
2138  {
2139  // If _J is the least significant dimension, then we need to adjust the
2140  // offset and extent for the underlying buffer's element size
2141  if (_J == (_Rank - 1))
2142  {
2143  unsigned int bufElemSize = static_cast<unsigned int>(_Descriptor._Get_buffer_ptr()->_Get_master_buffer_elem_size());
2144  unsigned int elemSize = _Element_size * sizeof(int);
2145 
2146  size_t sectionLSDOriginInBytes = _Section_origin[_J];
2147  sectionLSDOriginInBytes *= elemSize;
2148 
2149  size_t sectionLSDExtentInBytes = _Section_extent[_J];
2150  sectionLSDExtentInBytes *= elemSize;
2151 
2152  // The section offset and extent must be compatible with the underlying
2153  // buffer's element size
2154  if (((sectionLSDOriginInBytes % bufElemSize) != 0) ||
2155  ((sectionLSDExtentInBytes % bufElemSize) != 0))
2156  {
2157  throw runtime_exception("The array_view section origin and/or extent is incompatible with the underlying buffer", E_FAIL);
2158  }
2159 
2160  // The extent in the least significant dimension needs to be adjusted for
2161  // difference in element size between the buffer and ourselves
2162  _ASSERTE((sectionLSDOriginInBytes / bufElemSize) <= UINT_MAX);
2163  _New_view_offset[_I] = _Base_shape->_Get_view_offset()[_I] + static_cast<unsigned int>(sectionLSDOriginInBytes / bufElemSize);
2164 
2165  _ASSERTE((sectionLSDExtentInBytes / bufElemSize) <= UINT_MAX);
2166  _New_view_extent[_I] = static_cast<unsigned int>(sectionLSDExtentInBytes / bufElemSize);
2167  }
2168  else
2169  {
2170  _New_view_extent[_I] = _Section_extent[_J];
2171  _New_view_offset[_I] = _Base_shape->_Get_view_offset()[_I] + _Section_origin[_J];
2172  }
2173 
2174  _J++;
2175  }
2176 
2177  _I++;
2178  }
2179 
2180  _ASSERTE(_J == _Rank);
2181 
2182  return _View_shape::_Create_view_shape(_Base_shape->_Get_rank(),
2183  _Base_shape->_Get_linear_offset(),
2184  _Base_shape->_Get_base_extent(),
2185  _New_view_offset.data(),
2186  _New_view_extent.data(),
2187  _Base_shape->_Get_projection_info());
2188  }
size_t _Get_master_buffer_elem_size() const
Definition: amprt.h:3236
const unsigned int * _Get_base_extent() const
Definition: amprt.h:1601
const unsigned int * _Get_view_offset() const
Definition: amprt.h:1606
static _AMPIMP _Ret_ _View_shape *__cdecl _Create_view_shape(unsigned int _Rank, unsigned int _Linear_offset, const unsigned int *_Base_extent, const unsigned int *_View_offset, const unsigned int *_View_extent, const bool *_Projection_info=NULL)
const bool * _Get_projection_info() const
Definition: amprt.h:1615
#define UINT_MAX
Definition: limits.h:36
const unsigned int * _Get_view_extent() const
Definition: amprt.h:1610
#define _ASSERTE(expr)
Definition: crtdbg.h:707
unsigned int _Get_linear_offset() const
Definition: amprt.h:1596
char int *typedef int(__CRTDECL *_CRT_REPORT_HOOKW)(int
Definition: crtdbg.h:45
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
Definition: amprt.h:1581
unsigned int _Get_rank() const
Definition: amprt.h:1591
#define NULL
Definition: corecrt.h:158
template<int _Rank, int _Element_size>
static _Ret_ void* Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Create_section_buffer_shape ( const _Buffer_descriptor ,
const Concurrency::index< _Rank > &  ,
const Concurrency::extent< _Rank > &   
)
inlinestaticprivate
2210  {
2211  return NULL;
2212  }
#define NULL
Definition: corecrt.h:158
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Project0 ( int  _I,
_Array_view_base< _Rank-1, _Element_size > &  _Projected_view 
) const
inlineprotected
1913  {
1914  _Projected_view._M_buffer_descriptor = this->_M_buffer_descriptor;
1916 
1917  // Register the constructed view with the projection buffer view shape
1919  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
void _Register(_In_opt_ const _View_key _Source_view_key=nullptr) __CPU_ONLY
Definition: amp.h:2007
void _Project0(int _I, _Array_view_shape< _Rank-1, _Element_size > &_Projected_shape) const __GPU
Definition: amp.h:1633
static _Ret_ void * _Create_projection_buffer_shape(const _Buffer_descriptor &_Descriptor, unsigned int _Dim, int _Dim_offset) __CPU_ONLY
Definition: amp.h:2074
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register ( _In_opt_ const _View_key  _Source_view_key = nullptr)
inlineprivate
2008  {
2010  accelerator(accelerator::cpu_accelerator).default_view,
2012  _Source_view_key);
2013 
2015  {
2016  _Buffer_ptr _PBuf;
2018  accelerator(accelerator::cpu_accelerator).default_view,
2020  _PBuf)._Get();
2021 
2022  _M_buffer_descriptor._M_data_ptr = _PBuf->_Get_host_ptr();
2023  }
2024  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
_Ret_ _View_shape * _Create_buffer_view_shape() const __CPU_ONLY
Definition: amp.h:1942
_AMPIMP void _Register_view(_In_ _View_key _Key, accelerator_view _Cpu_av, _View_shape_ptr _Shape, _In_opt_ const _View_key _Source_view_key=nullptr)
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1035
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:450
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
void * _M_data_ptr
Definition: amprt.h:438
Definition: amprt.h:90
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register ( _In_ void _Shape)
inlineprivate
2032  {
2033  if (_Shape == NULL) {
2034  return;
2035  }
2036 
2037  // Unregister and register with the right shape
2038  _Unregister();
2039 
2041  accelerator(accelerator::cpu_accelerator).default_view,
2042  reinterpret_cast<_View_shape*>(_Shape));
2043 
2045  {
2046  _Buffer_ptr _PBuf;
2048  accelerator(accelerator::cpu_accelerator).default_view,
2050  _PBuf)._Get();
2051 
2052  _M_buffer_descriptor._M_data_ptr = _PBuf->_Get_host_ptr();
2053  }
2054  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
_AMPIMP void _Register_view(_In_ _View_key _Key, accelerator_view _Cpu_av, _View_shape_ptr _Shape, _In_opt_ const _View_key _Source_view_key=nullptr)
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1035
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:450
void _Unregister(bool _Throw_exception=true) __CPU_ONLY
Definition: amp.h:2056
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
void * _M_data_ptr
Definition: amprt.h:438
Definition: amprt.h:90
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
Definition: amprt.h:1581
#define NULL
Definition: corecrt.h:158
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register ( )
inlineprivate
2190 {}
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register ( _In_ void )
inlineprivate
2197  {
2198  }
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register_copy ( const _Array_view_base< _Rank, _Element_size > &  _Other)
inlineprivate
2027  {
2029  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
_AMPIMP void _Register_view_copy(_In_ _View_key _New_view_key, _In_ _View_key _Existing_view_key)
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Register_copy ( const _Array_view_base< _Rank, _Element_size > &  )
inlineprivate
2193  {
2194  }
template<int _Rank, int _Element_size>
template<int _New_element_size>
_Array_view_base<_Rank,_New_element_size> Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Reinterpret_as ( ) const
inlineprotected
1923  {
1924  static_assert(_Rank==1, "reinterpret_as is only permissible on array views of rank 1");
1925  int _New_size = _Calculate_reinterpreted_size<_Element_size,_New_element_size>(this->_M_view_extent.size());
1927  this->_M_total_linear_offset,
1928  Concurrency::extent<_Rank>(_New_size));
1929  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
Concurrency::extent< _Rank > _M_view_extent
Definition: amp.h:1672
The extent type represents an N-dimensional vector of int which specifies the bounds of an N-dimen...
Definition: amp.h:383
int _M_total_linear_offset
Definition: amp.h:1671
template<int _Rank, int _Element_size>
_Array_view_base Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Section ( const Concurrency::index< _Rank > &  _Section_origin,
const Concurrency::extent< _Rank > &  _Section_extent 
) const
inlineprotected
1898  {
1899  auto _View = _Array_view_base(*this, _Section_origin, _Section_extent);
1900 
1901  // Register the constructed view with the section buffer view shape
1902  _View._Register(_Array_view_base::_Create_section_buffer_shape(this->_M_buffer_descriptor, _Section_origin, _Section_extent));
1903 
1904  return _View;
1905  }
static _Ret_ void * _Create_section_buffer_shape(const _Buffer_descriptor &_Descriptor, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __CPU_ONLY
Definition: amp.h:2119
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
_Array_view_base() __GPU
Definition: amp.h:1695
template<int _Rank, int _Element_size>
_Array_view_base Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Section ( const index< _Rank > &  _Idx) const
inlineprotected
1908  {
1909  return _Section(_Idx, this->extent - _Idx);
1910  }
Definition: type_traits:1045
_Array_view_base _Section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Definition: amp.h:1897
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Unregister ( bool  _Throw_exception = true)
inlineprivate
2057  {
2058  if (!_Throw_exception && (std::current_exception() == nullptr)) {
2059  _Throw_exception = true;
2060  }
2061 
2062  try
2063  {
2065  }
2066  catch(...)
2067  {
2068  if (_Throw_exception) {
2069  throw;
2070  }
2071  }
2072  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
_AMPIMP void _Unregister_view(_In_ _View_key _Key)
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
exception_ptr current_exception() _NOEXCEPT
Definition: exception:366
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
template<int _Rank, int _Element_size>
void Concurrency::details::_Array_view_base< _Rank, _Element_size >::_Unregister ( bool  = true)
inlineprivate
2201  {
2202  }
template<int _Rank, int _Element_size>
template<int _New_rank>
_Array_view_base<_New_rank, _Element_size> Concurrency::details::_Array_view_base< _Rank, _Element_size >::_View_as ( const Concurrency::extent< _New_rank > &  _View_extent) const
inlineprotected
1933  {
1934  static_assert(_Rank==1, "view_as is only permissible on array views of rank 1");
1936  this->_M_total_linear_offset,
1937  _View_extent,
1938  index<_New_rank>(),
1939  _View_extent);
1940  }
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
int _M_total_linear_offset
Definition: amp.h:1671
template<int _Rank, int _Element_size>
_Array_view_base& Concurrency::details::_Array_view_base< _Rank, _Element_size >::operator= ( const _Array_view_base< _Rank, _Element_size > &  _Other)
inlineprotected
1857  {
1858  if (this != &_Other)
1859  {
1860  // Unregister the current view
1861  _Unregister();
1862 
1865 
1866  // Register the new view
1867  _Register_copy(_Other);
1868 
1869  // update this buffer descriptor in case _Register_copy was late and missed the update opportunity.
1871  }
1872 
1873  return *this;
1874  }
void _Register_copy(const _Array_view_base &_Other) __CPU_ONLY
Definition: amp.h:2026
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
void _Unregister(bool _Throw_exception=true) __CPU_ONLY
Definition: amp.h:2056
_Array_view_shape & operator=(const _Array_view_shape &_Other) __GPU
Definition: amp.h:1623

Friends And Related Function Documentation

template<int _Rank, int _Element_size>
template<int _R, int _S>
friend class _Array_view_base
friend

Member Data Documentation

template<int _Rank, int _Element_size>
_Buffer_descriptor Concurrency::details::_Array_view_base< _Rank, _Element_size >::_M_buffer_descriptor
protected

The documentation for this class was generated from the following file: