STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Namespaces | Classes | Typedefs | Enumerations | Functions | Variables
Concurrency::details Namespace Reference

Namespaces

 platform
 
 std
 

Classes

class  _Accelerator_view_hasher
 
class  _Aggregated_operation
 _Aggregated_operation base class More...
 
class  _Aggregator
 An aggregator for collecting operations coming from multiple sources and executing them serially on a single thread. _Operation_type must be derived from _Aggregated_operation. The parameter _Handler_type is a functor that will be passed the list of operations and is expected to handle each operation appropriately, setting the status of each operation to non-zero. More...
 
class  _Allocator_base
 
struct  _AllocBase
 
class  _Amp_runtime_trace
 
class  _AnonymousOriginator
 
struct  _arithmetic_op_helper
 
struct  _arithmetic_op_helper< _T, opAdd >
 
struct  _arithmetic_op_helper< _T, opDiv >
 
struct  _arithmetic_op_helper< _T, opMod >
 
struct  _arithmetic_op_helper< _T, opMul >
 
struct  _arithmetic_op_helper< _T, opSub >
 
struct  _arithmetic_op_loop_helper
 
struct  _arithmetic_op_loop_helper< _T, _Kind, 1, false >
 
struct  _arithmetic_op_loop_helper< _T, _Kind, _Rank, false >
 
struct  _arithmetic_op_loop_helper< _T, _Kind, _Rank, true >
 
struct  _Array_copy_desc
 
struct  _Array_flatten_helper
 
struct  _Array_flatten_helper< 1, _T1, _T2 >
 
struct  _Array_flatten_helper< 2, _T1, _T2 >
 
struct  _Array_flatten_helper< 3, _T1, _T2 >
 
struct  _Array_init_helper
 
struct  _Array_init_helper< _T1, _T2, 1 >
 
struct  _Array_init_helper< _T1, _T2, 2 >
 
struct  _Array_init_helper< _T1, _T2, 3 >
 
struct  _Array_init_helper< _T1, _T2, 4 >
 
class  _Array_projection_helper
 
class  _Array_projection_helper< _T, 1 >
 
class  _Array_view_base
 
class  _Array_view_projection_helper
 
class  _Array_view_projection_helper< _T, 1 >
 
class  _Array_view_shape
 
class  _AsyncOriginator
 
class  _AsyncTaskCollection
 Async Task collections is a thin wrapper over task collection to cater to the execution of asynchronous chores (or tasks defined in ppltasks.h). Specifically, they manage their own lifetime by using reference counts. Scheduling a chore acquires a reference and on completion of its execution the reference is released. More...
 
struct  _AutoDeleter
 
struct  _BadArgType
 
struct  _BadContinuationParamType
 
struct  _Beacon_reference
 Internal maintainence structure for beacons. More...
 
class  _Buffer
 
struct  _Buffer_descriptor
 
class  _Cancellation_beacon
 A cancellation beacon is a flag which can be polled in an inlinable fashion using the is_signaled method in lieu of polling on the more expensive non inlinable is_current_task_group_canceling method. More...
 
class  _CancellationTokenCallback
 
class  _CancellationTokenRegistration
 
class  _CancellationTokenState
 
struct  _Chore
 
struct  _cmp_op_helper
 
struct  _cmp_op_helper< _T, opEq >
 
struct  _cmp_op_helper< _T, opNeq >
 
struct  _cmp_op_loop_helper
 
struct  _cmp_op_loop_helper< _T, _Kind, 1, false >
 
struct  _cmp_op_loop_helper< _T, _Kind, _Rank, false >
 
struct  _cmp_op_loop_helper< _T, _Kind, _Rank, true >
 
struct  _compound_assign_op_helper
 
struct  _compound_assign_op_helper< _T, opAddEq >
 
struct  _compound_assign_op_helper< _T, opAssign >
 
struct  _compound_assign_op_helper< _T, opDivEq >
 
struct  _compound_assign_op_helper< _T, opModEq >
 
struct  _compound_assign_op_helper< _T, opMulEq >
 
struct  _compound_assign_op_helper< _T, opSubEq >
 
struct  _compound_assign_op_loop_helper
 
struct  _compound_assign_op_loop_helper< _T, _Kind, 1, false >
 
struct  _compound_assign_op_loop_helper< _T, _Kind, _Rank, false >
 
struct  _compound_assign_op_loop_helper< _T, _Kind, _Rank, true >
 
struct  _compound_assign_op_loop_helper< _T, opAssign, 1, false >
 
struct  _compound_assign_op_loop_helper< _T, opAssign, 2, true >
 
struct  _compound_assign_op_loop_helper< _T, opAssign, 3, true >
 
struct  _compound_assign_op_loop_helper< _T, opAssign, 4, true >
 
struct  _CONCRT_TRACE_INFO
 
class  _Concurrent_hash
 
class  _Concurrent_queue_base_v4
 
class  _Concurrent_queue_iterator
 
class  _Concurrent_queue_iterator_base_v4
 
class  _Concurrent_queue_rep
 
class  _Concurrent_unordered_map_traits
 
class  _Concurrent_unordered_set_traits
 
class  _Concurrent_vector_base_v4
 
class  _Condition_variable
 A _Condition_variable which is explicitly aware of the Concurrency Runtime. More...
 
class  _Const_array_projection_helper
 
class  _Const_array_projection_helper< _T, 1 >
 
class  _Const_array_view_projection_helper
 
class  _Const_array_view_projection_helper< _T, 1 >
 
struct  _contains
 
struct  _contains< _EXT, _IDX, 1 >
 
struct  _contains< _EXT, _IDX, 2 >
 
struct  _contains< _EXT, _IDX, 3 >
 
class  _Context
 
class  _ContextCallback
 
struct  _ContinuationTaskHandleBase
 
struct  _ContinuationTypeTraits
 
class  _CurrentScheduler
 
class  _D3D_interop
 
struct  _DPC_call_handle
 
class  _Dynamic_array
 
class  _Event
 
struct  _ExceptionHolder
 
struct  _Falsifier
 
struct  _FunctionTypeTraits
 
struct  _FunctionTypeTraits< _Function, void >
 
class  _Hash_compare
 
struct  _Host_Scheduling_info
 
struct  _index_helper
 
struct  _InitFunctorTypeTraits
 
struct  _InitFunctorTypeTraits< T, T >
 
struct  _Internal_task_options
 
class  _Interruption_exception
 
struct  _Is_container
 
struct  _IsIAsyncInfo
 
struct  _IsUnwrappedAsyncSelector
 
struct  _IsUnwrappedAsyncSelector< _TypeSelectorNoAsync >
 
class  _MallocaArrayHolder
 
class  _MallocaListHolder
 
struct  _map_index
 
struct  _map_index< _T1, 1 >
 
struct  _map_index< _T1, 2 >
 
struct  _map_index< _T1, 3 >
 
struct  _map_index< _T1, 4 >
 
struct  _Micro_queue
 
class  _NonReentrantBlockingLock
 
class  _NonReentrantPPLLock
 
struct  _NormalizeVoidToUnitType
 
struct  _NormalizeVoidToUnitType< void >
 
class  _Originator
 
struct  _PPLTaskHandle
 The _PPLTaskHandle is the strong-typed task handle base. All user task functions need to be wrapped in this task handler to be executable by PPL. By deriving from a different _BaseTaskHandle, it can be used for both initial tasks and continuation tasks. For initial tasks, _PPLTaskHandle will be derived from _UnrealizedChore_t, and for continuation tasks, it will be derived from _ContinuationTaskHandleBase. The life time of the _PPLTaskHandle object is be managed by runtime if task handle is scheduled. More...
 
struct  _product_helper
 
struct  _product_helper< _T, 1, false >
 
struct  _product_helper< _T, _Rank, false >
 
struct  _product_helper< _T, _Rank, true >
 
struct  _project0
 
struct  _project0< _RES_EXT, _SRC_EXT, _RES_IDX, _SRC_IDX, 2 >
 
struct  _project0< _RES_EXT, _SRC_EXT, _RES_IDX, _SRC_IDX, 3 >
 
class  _Projection_result_type
 
class  _Projection_result_type< _T, 1 >
 
class  _Queue
 
class  _ReaderWriterLock
 
class  _ReentrantBlockingLock
 
class  _ReentrantLock
 
class  _ReentrantPPLLock
 
class  _RefCounter
 
class  _RefCounterBase
 
class  _Reference_counted_obj_ptr
 
class  _Reference_counter
 
struct  _ResultHolder
 
class  _Runtime_object
 
class  _Sampler
 
struct  _Sampler_descriptor
 
class  _Scheduler
 
class  _Solist_const_iterator
 
class  _Solist_iterator
 
struct  _SpinCount
 
class  _SpinLock
 
class  _SpinWait
 Implements busy wait with no backoff More...
 
class  _Split_order_list_node
 
class  _Split_order_list_value
 
class  _Split_ordered_list
 
class  _StackGuard
 RAII wrapper used to maintain and limit ppltask maximum inline schedule depth. This class will keep a reference to the depth slot on current context. More...
 
class  _StructuredTaskCollection
 Structured task collections represent groups of work which follow a strictly LIFO ordered paradigm queueing and waiting respectively. They can only be waited on once and can only be used from a single thread of execution. More...
 
class  _Subatomic
 
struct  _Subatomic_impl
 
struct  _Subatomic_impl< 4 >
 
class  _SyncOriginator
 
struct  _Task_completion_event_impl
 
struct  _Task_generator_oversubscriber
 
struct  _Task_impl
 The implementation of a first-class task. This structure contains the task group used to execute the task function and handles the scheduling. The _Task_impl is created as a shared_ptr member of the the public task class, so its destruction is handled automatically. More...
 
struct  _Task_impl_base
 The base implementation of a first-class task. This class contains all the non-type specific implementation details of the task. More...
 
struct  _Task_ptr
 
class  _TaskCollection
 Task collections represent groups of work which step outside the strict structuring of the _StructuredTaskCollection definition. Any groups of work which do not follow LIFO ordering, are waited on multiple times, or are passed between arbitrary threads require utilization of this definition of a task collection. It has additional overhead over the _StructuredTaskCollection. More...
 
class  _TaskCollectionBase
 
class  _TaskCollectionImpl
 
class  _TaskCreationCallstack
 Callstack container, which is used to capture and preserve callstacks in ppltasks. Members of this class is examined by vc debugger, thus there will be no public access methods. Please note that names of this class should be kept stable for debugger examining. More...
 
struct  _TaskEventLogger
 
struct  _TaskProcHandle
 
struct  _TaskProcThunk
 Helper object used for LWT invocation. More...
 
struct  _TaskTypeFromParam
 
struct  _TaskTypeTraits
 
struct  _TaskTypeTraits< void >
 
struct  _TaskWorkItemRAIILogger
 
class  _Texture
 
struct  _Texture_descriptor
 
struct  _Texture_predefined_sample_helper
 
struct  _Texture_predefined_sample_helper< _T, 1 >
 
struct  _Texture_predefined_sample_helper< _T, 2 >
 
struct  _Texture_predefined_sample_helper< _T, 3 >
 
struct  _Texture_read_helper
 
struct  _Texture_read_helper< _T, 1 >
 
struct  _Texture_read_helper< _T, 2 >
 
struct  _Texture_read_helper< _T, 3 >
 
struct  _Texture_sample_helper
 
struct  _Texture_sample_helper< _T, 1 >
 
struct  _Texture_sample_helper< _T, 2 >
 
struct  _Texture_sample_helper< _T, 3 >
 
struct  _Texture_write_helper
 
struct  _Texture_write_helper< _T, 1 >
 
struct  _Texture_write_helper< _T, 2 >
 
struct  _Texture_write_helper< _T, 3 >
 
class  _Timer
 
struct  _TypeSelectorAsyncAction
 
struct  _TypeSelectorAsyncActionWithProgress
 
struct  _TypeSelectorAsyncOperation
 
struct  _TypeSelectorAsyncOperationOrTask
 
struct  _TypeSelectorAsyncOperationWithProgress
 
struct  _TypeSelectorAsyncTask
 
struct  _TypeSelectorNoAsync
 
class  _Ubiquitous_buffer
 
class  _UnrealizedChore
 
struct  _UnwrapTaskType
 
struct  _UnwrapTaskType< task< _Ty > >
 
class  _Vector_iterator
 
class  _View_shape
 
class  CancellationTokenRegistration_TaskProc
 

Typedefs

typedef enum _Short_vector_base_type_id _Texture_base_type_id
 
typedef _Buffer_descriptor_View_key
 
typedef struct Concurrency::details::_Buffer_descriptor _Buffer_descriptor
 
typedef struct Concurrency::details::_Texture_descriptor _Texture_descriptor
 
typedef struct Concurrency::details::_Sampler_descriptor _Sampler_descriptor
 
typedef std::unordered_set< accelerator_view, _Accelerator_view_hasher_Accelerator_view_unordered_set
 
typedef _SpinWait _SpinWaitBackoffNone
 
typedef _SpinWait< 0 > _SpinWaitNoYield
 
typedef void(__cdecl * _UnobservedExceptionHandler) (void)
 
typedef size_t _Ticket
 
typedef _Concurrent_queue_base_v4 _Concurrent_queue_base
 
typedef _Concurrent_queue_iterator_base_v4 concurrent_queue_iterator_base
 
typedef _Concurrent_vector_base_v4 _Concurrent_vector_base
 
typedef size_t _Map_key
 
typedef _Map_key _Split_order_key
 
typedef ::Concurrency::details::_TaskCollectionImpl _TaskCollection_t
 
typedef ::Concurrency::details::_TaskInliningMode _TaskInliningMode_t
 
typedef ::Concurrency::details::_Task_generator_oversubscriber _Task_generator_oversubscriber_t
 
typedef std::atomic< long > atomic_long
 Atomics More...
 
typedef std::atomic< size_tatomic_size_t
 
typedef unsigned char _Unit_type
 
typedef _TaskCollection_t::_TaskProcHandle_t _UnrealizedChore_t
 
typedef std::shared_ptr< _Task_impl_base_Task_ptr_base
 

Enumerations

enum  _Short_vector_base_type_id : unsigned int {
  _Uint_type = 0, _Int_type = 1, _Float_type = 2, _Unorm_type = 3,
  _Norm_type = 4, _Double_type = 5, _Invalid_type = 0xFFFFFFFF
}
 
enum  _DPC_kernel_func_kind { NON_ALIASED_SHADER = 0, ALIASED_SHADER = 1, NUM_SHADER_VERSIONS = 2 }
 
enum  _TaskCollectionStatus { _NotComplete, _Completed, _Canceled }
 
enum  _TaskInliningMode { _NoInline = 0, _DefaultAutoInline = 16, _ForceInline = -1 }
 The enum defines inlining scheduling policy for ppltasks. Scheduling a chore or a functor with _TaskInliningMode will give scheduler a hint on whether apply inline execution or not. More...
 
enum  _eInitializeState { _do_not_initialize }
 
enum  _op_kind {
  opEq, opNeq, opNot, opAssign,
  opAddEq, opSubEq, opMulEq, opDivEq,
  opModEq, opAdd, opSub, opMul,
  opDiv, opMod
}
 

Functions

template<class _Type >
__int64 _Trace_agents_get_id (_Type *_PObject)
 
template<int _Old_element_size, int _New_element_size>
int _Calculate_reinterpreted_size (int _Old_size) __GPU_ONLY
 
template<typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array< _Value_type, _Rank > &_Src, array< _Value_type, _Rank > &_Dest)
 
template<typename InputIterator , typename _Value_type , int _Rank>
_Event _Copy_async_impl (InputIterator _SrcFirst, InputIterator _SrcLast, array< _Value_type, _Rank > &_Dest)
 
template<typename OutputIterator , typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array< _Value_type, _Rank > &_Src, OutputIterator _DestIter)
 
template<typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array< _Value_type, _Rank > &_Src, const array_view< _Value_type, _Rank > &_Dest)
 
template<typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array_view< const _Value_type, _Rank > &_Src, array< _Value_type, _Rank > &_Dest)
 
template<typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array_view< const _Value_type, _Rank > &_Src, const array_view< _Value_type, _Rank > &_Dest)
 
template<typename InputIterator , typename _Value_type , int _Rank>
_Event _Copy_async_impl (InputIterator _SrcFirst, InputIterator _SrcLast, const array_view< _Value_type, _Rank > &_Dest)
 
template<typename OutputIterator , typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array_view< _Value_type, _Rank > &_Src, OutputIterator _DestIter)
 
_Ret_ _Accelerator_view_impl * _Get_accelerator_view_impl_ptr (const accelerator_view &_Accl_view)
 
_Ret_ _Accelerator_impl * _Get_accelerator_impl_ptr (const accelerator &_Accl)
 
_Event _Get_access_async (const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
 
unsigned int _Get_mipmap_levels (const _Texture *_Tex)
 
bool _Is_valid_access_mode (_Access_mode _Mode)
 
_AMPIMP size_t __cdecl _Get_num_devices ()
 
_AMPIMP _Ret_ _Accelerator_impl_ptr *__cdecl _Get_devices ()
 
_AMPIMP accelerator __cdecl _Select_default_accelerator ()
 
_AMPIMP bool __cdecl _Set_default_accelerator (_Accelerator_impl_ptr _Accl)
 
_AMPIMP bool __cdecl _Is_D3D_accelerator_view (const accelerator_view &_Av)
 
_AMPIMP void __cdecl _Register_async_event (const _Event &_Ev, const std::shared_future< void > &_Shared_future)
 
_AMPIMP _Access_mode __cdecl _Get_recommended_buffer_host_access_mode (const accelerator_view &_Av)
 
_Ret_ _View_shape_Create_reinterpreted_shape (const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
 
_Access_mode _Get_synchronize_access_mode (access_type cpu_access_type)
 
access_type _Get_cpu_access_type (_Access_mode _Cpu_access_mode)
 
_AMPIMP _Event __cdecl _Copy_impl (_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
 
_AMPIMP _Event __cdecl _Copy_async_impl (_In_ _Texture *_Src_tex, const size_t *_Src_offset, unsigned int _Src_mipmap_level, _Out_ _Texture *_Dst_tex, const size_t *_Dst_offset, unsigned int _Dst_mipmap_level, const size_t *_Copy_extent, const size_t *_Preferred_copy_chunk_extent=NULL)
 
bool _Get_chunked_staging_texture (_In_ _Texture *_Tex, const size_t *_Copy_chunk_extent, _Inout_ size_t *_Remaining_copy_extent, _Out_ size_t *_Curr_copy_extent, _Out_ _Texture_ptr *_Staging_texture)
 
template<typename _Input_iterator , typename _Value_type >
void _Copy_data_on_host (int _Rank, _Input_iterator _Src, _Out_ _Value_type *_Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Dst_row_pitch_in_bytes, size_t _Dst_depth_pitch_in_bytes, size_t _Src_row_pitch, size_t _Src_depth_pitch)
 
template<typename _Output_iterator , typename _Value_type >
void _Copy_data_on_host (int _Rank, const _Value_type *_Src, _Output_iterator _Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Src_row_pitch_in_bytes, size_t _Src_depth_pitch_in_bytes, size_t _Dst_row_pitch, size_t _Dst_depth_pitch)
 
_AMPIMP size_t __cdecl _Get_preferred_copy_chunk_size (size_t _Total_copy_size_in_bytes)
 
size_t _Get_preferred_copy_chunk_num_elems (size_t _Total_num_elems, size_t _Elem_size)
 
void _Get_preferred_copy_chunk_extent (unsigned int _Rank, size_t _Width, size_t _Height, size_t _Depth, size_t _Bits_per_element, _Out_writes_(3) size_t *_Preferred_copy_chunk_extent)
 
template<typename _T >
_T _Greatest_common_divisor (_T _M, _T _N)
 
template<typename _T >
_T _Least_common_multiple (_T _M, _T _N)
 
template<typename InputIterator , typename _Value_type >
_Event _Copy_impl (InputIterator _SrcFirst, InputIterator _SrcLast, size_t _NumElemsToCopy, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Preferred_copy_chunk_num_elems=0)
 
template<typename _InputIterator , typename _Distance >
std::enable_if< std::is_base_of< std::input_iterator_tag, typename std::iterator_traits< _InputIterator >::iterator_category >::value >::type _Advance_output_iterator (_InputIterator &_Iter, _Distance _N)
 
template<typename _OutputIterator , typename _Distance >
std::enable_if<!std::is_base_of< std::input_iterator_tag, typename std::iterator_traits< _OutputIterator >::iterator_category >::value >::type _Advance_output_iterator (_OutputIterator &_Iter, size_t _N)
 
template<typename OutputIterator , typename _Value_type >
_Event _Copy_impl (_In_ _Buffer *_Src, size_t _Src_offset, size_t _Num_elems, OutputIterator _DestIter, size_t _Preferred_copy_chunk_num_elems=0)
 
_AMPIMP _Event __cdecl _Copy_impl (_In_ _Buffer *_Src, _View_shape_ptr _Src_shape, _Out_ _Buffer *_Dst, _View_shape_ptr _Dst_shape)
 
_AMPIMP HRESULT __cdecl _Recursive_array_copy (const _Array_copy_desc &_Desc, unsigned int _Native_copy_rank, std::function< HRESULT(const _Array_copy_desc &_Reduced)> _Native_copy_func)
 
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view (_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
 
template<typename _InputInterator , typename _OutputIterator >
_Event _Copy_impl_iter (_InputInterator _SrcFirst, _InputInterator _SrcLast, _OutputIterator _DstFirst)
 
template<typename InputIterator , typename _Value_type >
_Event _Copy_impl (InputIterator _SrcFirst, _View_shape_ptr _Src_shape, _Inout_ _Buffer *_Dst, _View_shape_ptr _Dst_shape)
 
template<typename OutputIterator , typename _Value_type >
_Event _Copy_impl (_In_ _Buffer *_Src, _View_shape_ptr _Src_shape, OutputIterator _DestIter, _View_shape_ptr _Dst_shape)
 
template<typename _InputInterator , typename _OutputIterator >
_Event _Copy_impl_iter (_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
 
_Ret_ _View_shape_Get_buffer_view_shape (const _Buffer_descriptor &_Descriptor)
 
bool _Is_cpu_accelerator (const accelerator &_Accl)
 
_AMPIMP _Ret_ _Amp_runtime_trace *__cdecl _Get_amp_trace ()
 
template<class _T >
void _InternalDeleteHelper (_T *_PObject)
 
void _CRTIMP __cdecl _UnderlyingYield ()
 Default method for yielding during a spin wait More...
 
unsigned int _CRTIMP __cdecl _GetConcurrency ()
 Returns the hardware concurrency available to the Concurrency Runtime, taking into account process affinity, or any restrictions in place because of the set_task_execution_resources method. More...
 
_CRTIMP void __cdecl _SetUnobservedExceptionHandler (_UnobservedExceptionHandler)
 
_CRTIMP void __cdecl _ReportUnobservedException ()
 
_CRTIMP const _CONCRT_TRACE_INFO_GetConcRTTraceInfo ()
 Retrieves a pointer to the internal trace flags and level information for the Concurrency runtime ETW provider. More...
 
void _RegisterConcRTEventTracing ()
 Register ConcRT as an ETW Event Provider. More...
 
void _UnregisterConcRTEventTracing ()
 Unregister ConcRT as an ETW Event Provider. More...
 
template<typename _C , typename _Ty , typename _U >
bool operator== (const _Concurrent_queue_iterator< _C, _Ty > &_I, const _Concurrent_queue_iterator< _C, _U > &_J)
 
template<typename _C , typename _Ty , typename _U >
bool operator!= (const _Concurrent_queue_iterator< _C, _Ty > &_I, const _Concurrent_queue_iterator< _C, _U > &_J)
 
template<typename _Container , typename _Ty >
_Vector_iterator< _Container, _Ty > operator+ (ptrdiff_t _Offset, const _Vector_iterator< _Container, _Ty > &_Vec)
 
template<typename _Container , typename _Ty , typename _U >
bool operator== (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator!= (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator< (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator> (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator>= (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator<= (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
ptrdiff_t operator- (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
unsigned char _Reverse_byte (unsigned char _Original_byte)
 
unsigned char _Get_msb (size_t _Mask)
 
template<class _Mylist >
_Solist_const_iterator< _Mylist >::_Unchecked_type _Unchecked (_Solist_const_iterator< _Mylist > _Iterator)
 
template<class _Mylist >
_Solist_const_iterator< _Mylist > & _Rechecked (_Solist_const_iterator< _Mylist > &_Iterator, typename _Solist_const_iterator< _Mylist >::_Unchecked_type _Right)
 
template<class _Mylist >
_Solist_iterator< _Mylist >::_Unchecked_type _Unchecked (_Solist_iterator< _Mylist > _Iterator)
 
template<class _Mylist >
_Solist_iterator< _Mylist > & _Rechecked (_Solist_iterator< _Mylist > &_Iterator, typename _Solist_iterator< _Mylist >::_Unchecked_type _Right)
 
_CRTIMP2 size_t __cdecl _GetCombinableSize ()
 
template<typename _T >
_T atomic_compare_exchange (std::atomic< _T > &_Target, _T _Exchange, _T _Comparand)
 
template<typename _T >
_T atomic_exchange (std::atomic< _T > &_Target, _T _Value)
 
template<typename _T >
_T atomic_increment (std::atomic< _T > &_Target)
 
template<typename _T >
_T atomic_decrement (std::atomic< _T > &_Target)
 
template<typename _T >
_T atomic_add (std::atomic< _T > &_Target, _T value)
 
template<typename _T >
_TypeSelectorAsyncTask _AsyncOperationKindSelector (task< _T >)
 
_TypeSelectorNoAsync _AsyncOperationKindSelector (...)
 
template<typename _Function >
auto _IsCallable (_Function _Func, int) -> decltype(_Func(), std::true_type())
 
template<typename _Function >
std::false_type _IsCallable (_Function,...)
 
template<typename _Type >
task< _Type_To_task (_Type t)
 
task< void_To_task ()
 
template<typename _Function , typename _Type >
auto _ReturnTypeHelper (_Type t, _Function _Func, int, int) -> decltype(_Func(_To_task(t)))
 
template<typename _Function , typename _Type >
auto _ReturnTypeHelper (_Type t, _Function _Func, int,...) -> decltype(_Func(t))
 
template<typename _Function , typename _Type >
auto _ReturnTypeHelper (_Type t, _Function _Func,...) -> _BadContinuationParamType
 
template<typename _Function , typename _Type >
auto _IsTaskHelper (_Type t, _Function _Func, int, int) -> decltype(_Func(_To_task(t)), std::true_type())
 
template<typename _Function , typename _Type >
std::false_type _IsTaskHelper (_Type t, _Function _Func, int,...)
 
template<typename _Function >
auto _VoidReturnTypeHelper (_Function _Func, int, int) -> decltype(_Func(_To_task()))
 
template<typename _Function >
auto _VoidReturnTypeHelper (_Function _Func, int,...) -> decltype(_Func())
 
template<typename _Function >
auto _VoidIsTaskHelper (_Function _Func, int, int) -> decltype(_Func(_To_task()), std::true_type())
 
template<typename _Function >
std::false_type _VoidIsTaskHelper (_Function _Func, int,...)
 
static void _ScheduleFuncWithAutoInline (const std::function< void()> &_Func, _TaskInliningMode_t _InliningMode)
 Schedule a functor with automatic inlining. Note that this is "fire and forget" scheduling, which cannot be waited on or canceled after scheduling. This schedule method will perform automatic inlining base on . More...
 
_Internal_task_options_get_internal_task_options (task_options &options)
 
const _Internal_task_options_get_internal_task_options (const task_options &options)
 
void _LogCancelTask (_Task_impl_base *)
 
std::function< _Unit_type(void)> _MakeVoidToUnitFunc (const std::function< void(void)> &_Func)
 
template<typename _Type >
std::function< _Type(_Unit_type)> _MakeUnitToTFunc (const std::function< _Type(void)> &_Func)
 
template<typename _Type >
std::function< _Unit_type(_Type)> _MakeTToUnitFunc (const std::function< void(_Type)> &_Func)
 
std::function< _Unit_type(_Unit_type)> _MakeUnitToUnitFunc (const std::function< void(void)> &_Func)
 
template<typename _ReturnType , typename _Ty >
auto _IsValidTaskCtor (_Ty _Param, int, int, int, int) -> decltype(_Param(), std::true_type())
 
template<typename _ReturnType , typename _Ty >
auto _IsValidTaskCtor (_Ty _Param, int, int,...) -> decltype(_Param.set(stdx::declval< _ReturnType >()), std::true_type())
 
template<typename _ReturnType , typename _Ty >
auto _IsValidTaskCtor (_Ty _Param, int,...) -> decltype(_Param.set(), std::true_type())
 
template<typename _ReturnType , typename _Ty >
std::false_type _IsValidTaskCtor (_Ty _Param,...)
 
template<typename _ReturnType , typename _Ty >
void _ValidateTaskConstructorArgs (_Ty _Param)
 
template<typename _Ty >
_Ty _GetUnwrappedType (task< _Ty >)
 The following type traits are used for the create_task function. More...
 
template<typename _Ty >
auto _GetUnwrappedReturnType (_Ty _Arg, int) -> decltype(_GetUnwrappedType(_Arg))
 
template<typename _Ty >
_Ty _GetUnwrappedReturnType (_Ty,...)
 
template<typename _Ty >
_Ty _GetTaskType (task_completion_event< _Ty >, std::false_type)
 _GetTaskType functions will retrieve task type T in task[T](Arg), for given constructor argument Arg and its property "callable". It will automatically unwrap argument to get the final return type if necessary. More...
 
template<typename _Ty >
auto _GetTaskType (_Ty _NonFunc, std::false_type) -> decltype(_GetUnwrappedType(_NonFunc))
 
template<typename _Ty >
auto _GetTaskType (_Ty _Func, std::true_type) -> decltype(_GetUnwrappedReturnType(_Func(), 0))
 
void _GetTaskType (std::function< void()>, std::true_type)
 
template<typename _Ty >
auto _FilterValidTaskType (_Ty _Param, int) -> decltype(_GetTaskType(_Param, _IsCallable(_Param, 0)))
 
template<typename _Ty >
_BadArgType _FilterValidTaskType (_Ty _Param,...)
 
template<class _Tuple_type >
_Tuple_type _Create_uninitialized_tuple () __GPU
 
template<typename _Array_type >
const _Buffer_descriptor_Get_buffer_descriptor (const _Array_type &_Array) __GPU
 
template<typename _Array_type >
_Ret_ _Ubiquitous_buffer_Get_buffer (const _Array_type &_Array) __CPU_ONLY
 
template<typename _Array_type >
_Event _Get_access_async (const _Array_type &_Array, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr) __CPU_ONLY
 
template<typename _Array_type >
void_Get_datasource_identity (const _Array_type &_Array)
 
template<typename _Texture_type >
const _Texture_descriptor_Get_texture_descriptor (const _Texture_type &_Tex) __GPU
 
template<typename _Texture_type >
_Ret_ _Texture_Get_texture (const _Texture_type &_Tex) __CPU_ONLY
 
template<int _Rank, template< int > class _T1, template< int > class _T2>
static void _Is_valid_section (const _T2< _Rank > &_Base_extent, const _T1< _Rank > &_Section_origin, const _T2< _Rank > &_Section_extent) __CPU_ONLY
 
template<int _Rank, template< int > class _T1>
static void _Is_valid_projection (int _I, const _T1< _Rank > &_Base_extent) __CPU_ONLY
 
template<int _Rank, template< int > class _T>
static void _Is_positive (const _T< _Rank > &_Tuple) __CPU_ONLY
 
template<int _Rank, template< int > class _T>
static void _Is_nonnegative (const _T< _Rank > &_Tuple) __CPU_ONLY
 
template<int _Rank, template< int > class _T>
static void _Is_valid_extent (const _T< _Rank > &_Tuple) __CPU_ONLY
 
template<int _Rank>
unsigned int _Get_max_mipmap_levels (const extent< _Rank > &_Extent)
 
void _Are_valid_mipmap_parameters (unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
 
template<int _Rank>
extent< _Rank > _Get_extent_at_level_unsafe (const extent< _Rank > &_Base_extent, unsigned int _Level) __GPU
 
template<int _Rank>
extent< _Rank > _Get_extent_at_level (const extent< _Rank > &_Base_extent, unsigned int _Level)
 
void _Is_valid_mipmap_range (unsigned int _Src_view_mipmap_levels, unsigned int _Dst_most_detailed_level, unsigned int _Dst_view_mipmap_levels)
 
template<int _Rank, typename _Kernel_type >
void _Parallel_for_each (_In_ _Host_Scheduling_info *_Sch_info, extent< _Rank > _Compute_domain, const _Kernel_type &_F)
 
template<int _Dim0, int _Dim1, int _Dim2, typename _Kernel_type >
void _Parallel_for_each (_In_ _Host_Scheduling_info *_Sch_info, tiled_extent< _Dim0, _Dim1, _Dim2 > _Compute_domain, const _Kernel_type &_F)
 
template<int _Dim0, int _Dim1, typename _Kernel_type >
void _Parallel_for_each (_In_ _Host_Scheduling_info *_Sch_info, tiled_extent< _Dim0, _Dim1 > _Compute_domain, const _Kernel_type &_F)
 
template<int _Dim0, typename _Kernel_type >
void _Parallel_for_each (_In_ _Host_Scheduling_info *_Sch_info, tiled_extent< _Dim0 > _Compute_domain, const _Kernel_type &_F)
 
void __dp_no_source_func () __GPU_ONLY
 
template<>
extent< 1 > _Get_extent_at_level_unsafe< 1 > (const extent< 1 > &_Base_extent, unsigned int _Level) __GPU
 
template<>
extent< 2 > _Get_extent_at_level_unsafe< 2 > (const extent< 2 > &_Base_extent, unsigned int _Level) __GPU
 
template<>
extent< 3 > _Get_extent_at_level_unsafe< 3 > (const extent< 3 > &_Base_extent, unsigned int _Level) __GPU
 

Variables

const size_t ERROR_MSG_BUFFER_SIZE = 1024
 
_CRTIMP2 const unsigned char _Byte_reverse_table []
 
static const int LOOP_UNROLL_THRESHOLD = 4
 

Typedef Documentation

typedef unsigned char Concurrency::details::_Unit_type
typedef void(__cdecl * Concurrency::details::_UnobservedExceptionHandler) (void)
typedef std::atomic<long> Concurrency::details::atomic_long

Atomics

Enumeration Type Documentation

Enumerator
NON_ALIASED_SHADER 
ALIASED_SHADER 
NUM_SHADER_VERSIONS 
3665  {
3666  NON_ALIASED_SHADER = 0, // slot 0
3667  ALIASED_SHADER = 1, // slot 1
3669  };
Definition: amprt.h:3667
Enumerator
_do_not_initialize 
208  {
210  };
Enumerator
opEq 
opNeq 
opNot 
opAssign 
opAddEq 
opSubEq 
opMulEq 
opDivEq 
opModEq 
opAdd 
opSub 
opMul 
opDiv 
opMod 
226  {
227  // cmp op
228  opEq, // a == b
229  opNeq, // a != b
230  // not op
231  opNot, // !a
232  // compound assignment
233  opAssign, // a = b
234  opAddEq, // a += b;
235  opSubEq, // a -= b;
236  opMulEq, // a *= b
237  opDivEq, // a /= b
238  opModEq, // a %= b
239  // arithmetic ops
240  opAdd, // c = a + b
241  opSub, // c = a - b
242  opMul, // c = a * b
243  opDiv, // c = a / b
244  opMod, // c = a % b
245  };
Definition: xxamp.h:229
Definition: xxamp.h:242
Definition: xxamp.h:236
Definition: xxamp.h:238
Definition: xxamp.h:231
Definition: xxamp.h:240
Definition: xxamp.h:243
Definition: xxamp.h:228
Definition: xxamp.h:244
Definition: xxamp.h:237
Definition: xxamp.h:233
Definition: xxamp.h:241
Definition: xxamp.h:235
Definition: xxamp.h:234
Enumerator
_Uint_type 
_Int_type 
_Float_type 
_Unorm_type 
_Norm_type 
_Double_type 
_Invalid_type 
285  : unsigned int
286  {
287  _Uint_type = 0,
288  _Int_type = 1,
289  _Float_type = 2,
290  _Unorm_type = 3,
291  _Norm_type = 4,
292  _Double_type = 5,
293  _Invalid_type = 0xFFFFFFFF
294  };
Definition: amprt.h:289
Definition: amprt.h:291
Definition: amprt.h:288
Definition: amprt.h:287
Definition: amprt.h:292
Definition: amprt.h:290
Enumerator
_NotComplete 
_Completed 
_Canceled 
4424  {
4425  _NotComplete,
4426  _Completed,
4427  _Canceled
4428  };
Definition: concrt.h:4426
Definition: concrt.h:4427
Definition: concrt.h:4425

The enum defines inlining scheduling policy for ppltasks. Scheduling a chore or a functor with _TaskInliningMode will give scheduler a hint on whether apply inline execution or not.

As an optimization, we assigned an integer number to each option in the enum, which efectively stands for the maximal inlining depth (threshold) for current chore, and the scheduler will compare this threshold with current context's inlining depth to make inline decision. If the current context's inlining depth greater than this threshold, the chore will be scheduled on a new context, otherwise the chore will be scheduled inline. Minimal threshold 0 means do not inline; maximal threshold -1 (0xFFFFFFFF....) means always inline. 16 is a good default inlining threshold we figured out from experiment.

Enumerator
_NoInline 
_DefaultAutoInline 
_ForceInline 
5271  {
5272  // Disable inline scheduling
5273  _NoInline = 0,
5274  // Let runtime decide whether to do inline scheduling or not
5275  _DefaultAutoInline = 16,
5276  // Always do inline scheduling
5277  _ForceInline = -1,
5278  };
Definition: concrt.h:5277
Definition: concrt.h:5273

Function Documentation

void Concurrency::details::__dp_no_source_func ( )
inline
1410  {
1412  }
void __dp_no_source_stub() __GPU_ONLY
template<typename _InputIterator , typename _Distance >
std::enable_if<std::is_base_of<std::input_iterator_tag, typename std::iterator_traits<_InputIterator>::iterator_category>::value>::type Concurrency::details::_Advance_output_iterator ( _InputIterator &  _Iter,
_Distance  _N 
)
2885  {
2886  std::advance(_Iter, _N);
2887  }
_N
Definition: wchar.h:1269
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:695
template<typename _OutputIterator , typename _Distance >
std::enable_if<!std::is_base_of<std::input_iterator_tag, typename std::iterator_traits<_OutputIterator>::iterator_category>::value>::type Concurrency::details::_Advance_output_iterator ( _OutputIterator &  _Iter,
size_t  _N 
)
2892  {
2893  for (size_t i = 0; i < _N; ++i)
2894  {
2895  _Iter++;
2896  }
2897  }
_N
Definition: wchar.h:1269
int i[4]
Definition: dvec.h:70
void Concurrency::details::_Are_valid_mipmap_parameters ( unsigned int  _Most_detailed_mipmap_level,
unsigned int  _Mipmap_levels = 0 
)
inline
1256  {
1257  if (_Most_detailed_mipmap_level >= 32)
1258  {
1259  throw runtime_exception("The most detailed mipmap level cannot be greater than or equal to 32", E_INVALIDARG);
1260  }
1261 
1262  if (_Mipmap_levels > 32)
1263  {
1264  throw runtime_exception("The number of mipmap levels cannot be greater than 32", E_INVALIDARG);
1265  }
1266  }
template<typename _T >
_TypeSelectorAsyncTask Concurrency::details::_AsyncOperationKindSelector ( task< _T )
_TypeSelectorNoAsync Concurrency::details::_AsyncOperationKindSelector (   ...)
template<int _Old_element_size, int _New_element_size>
int Concurrency::details::_Calculate_reinterpreted_size ( int  _Old_size)
1503 {
1504  int _Total_size = _Old_element_size * _Old_size;
1505  int _New_size = (_Total_size + _New_element_size - 1)/ _New_element_size;
1506 
1507  return _New_size;
1508 }
_AMPIMP _Event __cdecl Concurrency::details::_Copy_async_impl ( _In_ _Texture _Src_tex,
const size_t _Src_offset,
unsigned int  _Src_mipmap_level,
_Out_ _Texture _Dst_tex,
const size_t _Dst_offset,
unsigned int  _Dst_mipmap_level,
const size_t _Copy_extent,
const size_t _Preferred_copy_chunk_extent = NULL 
)
template<typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array< _Value_type, _Rank > &  _Src,
array< _Value_type, _Rank > &  _Dest 
)
5730 {
5731  if (_Src.extent.size() > _Dest.extent.size())
5732  {
5733  throw runtime_exception("Invalid _Src argument. _Src size exceeds total size of the _Dest.", E_INVALIDARG);
5734  }
5735 
5736  // We can obliterate the exisiting content of dest if it is about to be totally overwritten
5737  _Access_mode _Dest_access_mode = (_Src.extent.size() == _Dest.extent.size()) ? _Write_access : _Read_write_access;
5738 
5739  _Buffer_ptr _PBufSrc, _PBufDest;
5740  _Event _Ev = _Get_access_async(_Src, _Read_access, _PBufSrc);
5741  _Ev = _Ev._Add_event(_Get_access_async(_Dest, _Dest_access_mode, _PBufDest));
5742  size_t _NumElemsToCopy = (_Src.extent.size() * sizeof(_Value_type)) / _PBufSrc->_Get_elem_size();
5743  return _Ev._Add_continuation(std::function<_Event()>([_PBufSrc, _PBufDest, _NumElemsToCopy]() mutable -> _Event {
5744  return details::_Copy_impl(_PBufSrc, 0, _PBufDest, 0, _NumElemsToCopy);
5745  }));
5746 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
Definition: amprt.h:85
size_type size() const _NOEXCEPT
Definition: array:116
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
Definition: amprt.h:88
_Access_mode
Definition: amprt.h:82
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
Definition: amprt.h:312
Definition: amprt.h:86
template<typename InputIterator , typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( InputIterator  _SrcFirst,
InputIterator  _SrcLast,
array< _Value_type, _Rank > &  _Dest 
)
5750 {
5751  size_t _NumElemsToCopy = std::distance(_SrcFirst, _SrcLast);
5752  // We can obliterate the exisiting content of dest if it is about to be totally overwritten
5753  _Access_mode _Dest_access_mode = (_NumElemsToCopy == _Dest.extent.size()) ? _Write_access : _Read_write_access;
5754  _Buffer_ptr _PDestBuf;
5755  _Event _Ev = _Get_access_async(_Dest, _Dest_access_mode, _PDestBuf);
5756 
5757  return _Ev._Add_continuation(std::function<_Event()>([_SrcFirst, _SrcLast, _PDestBuf, _NumElemsToCopy]() mutable -> _Event {
5758  return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _SrcLast, _NumElemsToCopy, _PDestBuf, 0);
5759  }));
5760 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
size_type size() const _NOEXCEPT
Definition: array:116
iterator_traits< _InIt >::difference_type distance(_InIt _First, _InIt _Last)
Definition: xutility:755
Definition: amprt.h:88
_Access_mode
Definition: amprt.h:82
Definition: amprt.h:312
Definition: amprt.h:86
template<typename OutputIterator , typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array< _Value_type, _Rank > &  _Src,
OutputIterator  _DestIter 
)
5764 {
5765  _Buffer_ptr _PSrcBuf;
5766  _Event _Ev = _Get_access_async(_Src, _Read_access, _PSrcBuf);
5767  size_t _NumElemsToCopy = (_Src.extent.size() * sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size();
5768  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _NumElemsToCopy, _DestIter]() mutable -> _Event {
5769  return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, 0, _NumElemsToCopy, _DestIter);
5770  }));
5771 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
Definition: amprt.h:85
size_type size() const _NOEXCEPT
Definition: array:116
Definition: amprt.h:312
template<typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array< _Value_type, _Rank > &  _Src,
const array_view< _Value_type, _Rank > &  _Dest 
)
5775 {
5776  const _Buffer_descriptor &_SrcBufDesc = _Get_buffer_descriptor(_Src);
5777  const _Buffer_descriptor &_DestBufDesc = _Get_buffer_descriptor(_Dest);
5778  if (_SrcBufDesc._Get_buffer_ptr() == _DestBufDesc._Get_buffer_ptr()) {
5779  throw runtime_exception("Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5780  }
5781 
5782  _Buffer_ptr _PSrcBuf, _PDestBuf;
5783  _Event _Ev = _Get_access_async(_Src, _Read_access, _PSrcBuf);
5784 
5785  // The source accelerator_view is driven by array's master location,
5786  // therefore we can pass nullptr to avoid unnecessary computation
5787  auto _AccelInfo = _Get_src_dest_accelerator_view(nullptr, &_DestBufDesc);
5788 
5789  _Ev = _Ev._Add_event(_Get_access_async(_DestBufDesc._Get_view_key(), _AccelInfo.second, _Write_access, _PDestBuf));
5790  _View_shape_ptr _PSrcShape = _Get_buffer_view_shape(_SrcBufDesc);
5791  _View_shape_ptr _PDestShape = _Get_buffer_view_shape(_DestBufDesc);
5792  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape]() mutable -> _Event {
5793  return details::_Copy_impl(_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape);
5794  }));
5795 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
Definition: amprt.h:85
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:307
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3463
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1064
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
Definition: amprt.h:312
Definition: amprt.h:86
template<typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array_view< const _Value_type, _Rank > &  _Src,
array< _Value_type, _Rank > &  _Dest 
)
5799 {
5800  const _Buffer_descriptor &_SrcBufDesc = _Get_buffer_descriptor(_Src);
5801  const _Buffer_descriptor &_DestBufDesc = _Get_buffer_descriptor(_Dest);
5802  if (_SrcBufDesc._Get_buffer_ptr() == _DestBufDesc._Get_buffer_ptr()) {
5803  throw runtime_exception("Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5804  }
5805 
5806  auto _AccelInfo = _Get_src_dest_accelerator_view(&_SrcBufDesc, &_DestBufDesc);
5807 
5808  _Buffer_ptr _PSrcBuf, _PDestBuf;
5809  _Event _Ev = _Get_access_async(_SrcBufDesc._Get_view_key(), _AccelInfo.first, _Read_access, _PSrcBuf);
5810  _Ev = _Ev._Add_event(_Get_access_async(_Dest, _Write_access, _PDestBuf));
5811  _View_shape_ptr _PSrcShape = _Get_buffer_view_shape(_SrcBufDesc);
5812  _View_shape_ptr _PDestShape = _Get_buffer_view_shape(_DestBufDesc);
5813  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape]() mutable -> _Event {
5814  return details::_Copy_impl(_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape);
5815  }));
5816 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
Definition: amprt.h:85
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:307
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3463
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1064
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
Definition: amprt.h:312
Definition: amprt.h:86
template<typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array_view< const _Value_type, _Rank > &  _Src,
const array_view< _Value_type, _Rank > &  _Dest 
)
5820 {
5821  const _Buffer_descriptor &_SrcBufDesc = _Get_buffer_descriptor(_Src);
5822  const _Buffer_descriptor &_DestBufDesc = _Get_buffer_descriptor(_Dest);
5823  _View_shape_ptr _PSrcShape = _Get_buffer_view_shape(_SrcBufDesc);
5824  _View_shape_ptr _PDestShape = _Get_buffer_view_shape(_DestBufDesc);
5825  if ((_SrcBufDesc._Get_buffer_ptr() == _DestBufDesc._Get_buffer_ptr()) && _PSrcShape->_Overlaps(_PDestShape)) {
5826  throw runtime_exception("Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5827  }
5828 
5829  auto _AccelInfo = _Get_src_dest_accelerator_view(&_SrcBufDesc, &_DestBufDesc);
5830 
5831  _Buffer_ptr _PSrcBuf, _PDestBuf;
5832  _Event _Ev = _Get_access_async(_SrcBufDesc._Get_view_key(), _AccelInfo.first, _Read_access, _PSrcBuf);
5833  _Ev = _Ev._Add_event(_Get_access_async(_DestBufDesc._Get_view_key(), _AccelInfo.second, _Write_access, _PDestBuf));
5834  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape]() mutable -> _Event {
5835  return details::_Copy_impl(_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape);
5836  }));
5837 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
Definition: amprt.h:85
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:307
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3463
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1064
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
Definition: amprt.h:312
Definition: amprt.h:86
template<typename InputIterator , typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( InputIterator  _SrcFirst,
InputIterator  _SrcLast,
const array_view< _Value_type, _Rank > &  _Dest 
)
5841 {
5842  static_assert(!std::is_const<_Value_type>::value, "Cannot copy to array_view<const _Value_type, _Rank>.");
5843 
5844  size_t _Src_size = std::distance(_SrcFirst, _SrcLast);
5845 
5846  // Source cannot be greater than destination
5847  if (_Src_size > _Dest.extent.size())
5848  {
5849  throw runtime_exception("Number of elements in range between [_SrcFirst, _SrcLast) exceeds total size of the _Dest.", E_INVALIDARG);
5850  }
5851 
5852 #pragma warning( push )
5853 #pragma warning( disable : 4127 ) // Disable warning about constant conditional expression
5854  // Higher ranks need to have as many elements as in _Dest array_view
5855  if ((_Rank > 1) && (_Src_size != _Dest.extent.size()))
5856  {
5857  throw runtime_exception("For _Rank > 1 the number of elements in range between [_SrcFirst, _SrcLast) has to be equal to total size of the _Dest.", E_INVALIDARG);
5858  }
5859 #pragma warning( pop )
5860 
5861  // We can obliterate the exisiting content of dest if it is about to be totally overwritten
5862  _Access_mode _Dest_access_mode = (_Src_size == _Dest.extent.size()) ? _Write_access : _Read_write_access;
5863 
5864  // Get read-write access for array_view on cpu_accelerator and take underlying pointer to data
5865  const _Buffer_descriptor &_DestBufDesc = _Get_buffer_descriptor(_Dest);
5866 
5867  auto _AccelInfo = _Get_src_dest_accelerator_view(nullptr, &_DestBufDesc);
5868 
5869  _Buffer_ptr _PDestBuf;
5870  _Event _Ev = _Get_access_async(_DestBufDesc._Get_view_key(), _AccelInfo.second, _Dest_access_mode, _PDestBuf);
5871 
5872  _View_shape_ptr _Dst_shape = _Get_buffer_view_shape(_DestBufDesc);
5873 
5874  // If the _Dst shape is linear then perform a linear copy
5875  unsigned int _Dst_linear_offset, _Dst_linear_size;
5876  if (_Dst_shape->_Is_view_linear(_Dst_linear_offset, _Dst_linear_size))
5877  {
5878  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PDestBuf, _SrcFirst, _SrcLast, _Src_size, _Dst_linear_offset]() mutable -> _Event {
5879  return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _SrcLast, _Src_size, _PDestBuf, _Dst_linear_offset);
5880  }));
5881  }
5882  else
5883  {
5884  _View_shape_ptr _Reinterpreted_dst_shape = _Create_reinterpreted_shape(_Dst_shape, _PDestBuf->_Get_elem_size(), sizeof(_Value_type));
5885 
5886  // Source has as many elements as in destination, reshape source to match destination shape
5887  std::vector<unsigned int> _Src_offset(_Reinterpreted_dst_shape->_Get_rank(), 0);
5888  _View_shape_ptr _Src_shape = details::_View_shape::_Create_view_shape(_Reinterpreted_dst_shape->_Get_rank(), 0 /* linear offset*/,
5889  _Reinterpreted_dst_shape->_Get_view_extent(), _Src_offset.data(),
5890  _Reinterpreted_dst_shape->_Get_view_extent());
5891 
5892  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PDestBuf, _SrcFirst, _Src_shape, _Dst_shape]() mutable -> _Event {
5893  return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _Src_shape, _PDestBuf, _Dst_shape);
5894  }));
5895  }
5896 
5897  return _Ev;
5898 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:307
iterator_traits< _InIt >::difference_type distance(_InIt _First, _InIt _Last)
Definition: xutility:755
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
Definition: amprt.h:88
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3463
_Access_mode
Definition: amprt.h:82
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1064
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1961
Definition: amprt.h:312
Definition: amprt.h:86
template<typename OutputIterator , typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array_view< _Value_type, _Rank > &  _Src,
OutputIterator  _DestIter 
)
5902 {
5903  // Caller is responsible for passing valid _DestIter
5904 
5905  // Get read access for array_view on cpu_accelerator and take underlying pointer to data
5906  const _Buffer_descriptor &_SrcBufDesc = _Get_buffer_descriptor(_Src);
5907 
5908  auto _AccelInfo = _Get_src_dest_accelerator_view(&_SrcBufDesc, nullptr);
5909 
5910  _Buffer_ptr _PSrcBuf;
5911  _Event _Ev = _Get_access_async(_SrcBufDesc._Get_view_key(), _AccelInfo.first, _Read_access, _PSrcBuf);
5912 
5913  // Get source shape
5914  _View_shape_ptr _Src_shape = _Get_buffer_view_shape(_SrcBufDesc);
5915 
5916  // If the _Src_shape is linear then perform a linear copy
5917  unsigned int _Src_linear_offset, _Src_linear_size;
5918  if (_Src_shape->_Is_view_linear(_Src_linear_offset, _Src_linear_size))
5919  {
5920  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _Src_linear_offset, _Src_linear_size, _DestIter]() mutable -> _Event {
5921  return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, _Src_linear_offset, _Src_linear_size, _DestIter);
5922  }));
5923  }
5924  else
5925  {
5926  _View_shape_ptr _Reinterpreted_src_shape = _Create_reinterpreted_shape(_Src_shape, _PSrcBuf->_Get_elem_size(), sizeof(_Value_type));
5927 
5928  // Valid destination should have space for as many elements as in source array_view, reshape to match source view shape
5929  std::vector<unsigned int> _Dst_offset(_Reinterpreted_src_shape->_Get_rank(), 0);
5930  _View_shape_ptr _Dst_shape = details::_View_shape::_Create_view_shape(_Reinterpreted_src_shape->_Get_rank(), 0 /* linear offset*/,
5931  _Reinterpreted_src_shape->_Get_view_extent(), _Dst_offset.data(),
5932  _Reinterpreted_src_shape->_Get_view_extent());
5933 
5934  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _Src_shape, _DestIter, _Dst_shape]() mutable -> _Event {
5935  return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, _Src_shape, _DestIter, _Dst_shape);
5936  }));
5937  }
5938 
5939  return _Ev;
5940 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
Definition: amprt.h:85
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:307
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3463
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1064
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1961
Definition: amprt.h:312
template<typename _Input_iterator , typename _Value_type >
void Concurrency::details::_Copy_data_on_host ( int  _Rank,
_Input_iterator  _Src,
_Out_ _Value_type *  _Dst,
size_t  _Width,
size_t  _Height,
size_t  _Depth,
size_t  _Dst_row_pitch_in_bytes,
size_t  _Dst_depth_pitch_in_bytes,
size_t  _Src_row_pitch,
size_t  _Src_depth_pitch 
)
inline
2568  {
2569  switch(_Rank)
2570  {
2571  case 1:
2572  {
2573  _Input_iterator _End = _Src;
2574  std::advance(_End, _Width);
2576  }
2577  break;
2578  case 2:
2579  {
2580  unsigned char *_Dst_ptr = reinterpret_cast<unsigned char *>(_Dst);
2581  _Input_iterator _Src_start = _Src;
2582  for (size_t _I = 0; _I < _Height; _I++)
2583  {
2584  _Input_iterator _Src_end = _Src_start;
2585  std::advance(_Src_end, _Width);
2586 
2587  std::copy(_Src_start, _Src_end, stdext::make_unchecked_array_iterator(reinterpret_cast<_Value_type*>(_Dst_ptr)));
2588 
2589  _Dst_ptr += _Dst_row_pitch_in_bytes;
2590  std::advance(_Src_start, _Src_row_pitch);
2591  }
2592  }
2593  break;
2594  case 3:
2595  {
2596  unsigned char *_Dst_ptr_slice_start = reinterpret_cast<unsigned char *>(_Dst);
2597  _Input_iterator _Src_depth_slice_start = _Src;
2598  for (size_t _I = 0; _I < _Depth; _I++)
2599  {
2600  _Input_iterator _Src_start = _Src_depth_slice_start;
2601  unsigned char *_Dst_ptr = _Dst_ptr_slice_start;
2602 
2603  for (size_t _J = 0; _J < _Height; _J++)
2604  {
2605  _Input_iterator _Src_end = _Src_start;
2606  std::advance(_Src_end, _Width);
2607 
2608  std::copy(_Src_start, _Src_end, stdext::make_unchecked_array_iterator(reinterpret_cast<_Value_type*>(_Dst_ptr)));
2609 
2610  _Dst_ptr += _Dst_row_pitch_in_bytes;
2611  std::advance(_Src_start, _Src_row_pitch);
2612  }
2613 
2614  _Dst_ptr_slice_start += _Dst_depth_pitch_in_bytes;
2615  std::advance(_Src_depth_slice_start, _Src_depth_pitch);
2616  }
2617  }
2618  break;
2619  default:
2620  _ASSERTE(FALSE);
2621  break;
2622  }
2623  }
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2072
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:729
#define _ASSERTE(expr)
Definition: crtdbg.h:216
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:695
template<typename _Output_iterator , typename _Value_type >
void Concurrency::details::_Copy_data_on_host ( int  _Rank,
const _Value_type *  _Src,
_Output_iterator  _Dst,
size_t  _Width,
size_t  _Height,
size_t  _Depth,
size_t  _Src_row_pitch_in_bytes,
size_t  _Src_depth_pitch_in_bytes,
size_t  _Dst_row_pitch,
size_t  _Dst_depth_pitch 
)
inline
2631  {
2632  switch(_Rank)
2633  {
2634  case 1:
2635  {
2636  const _Value_type * _End = _Src + _Width;
2638  }
2639  break;
2640  case 2:
2641  {
2642  const unsigned char *_Src_ptr = reinterpret_cast<const unsigned char *>(_Src);
2643  _Output_iterator _Dst_iter = _Dst;
2644  for (size_t _I = 0; _I < _Height; _I++)
2645  {
2646  const _Value_type * _Src_end = reinterpret_cast<const _Value_type*>(_Src_ptr) + _Width;
2647 
2648  std::copy(stdext::make_unchecked_array_iterator(reinterpret_cast<const _Value_type*>(_Src_ptr)), stdext::make_unchecked_array_iterator(_Src_end), _Dst_iter);
2649  std::advance(_Dst_iter, _Dst_row_pitch);
2650  _Src_ptr += _Src_row_pitch_in_bytes;
2651  }
2652  }
2653  break;
2654  case 3:
2655  {
2656  const unsigned char *_Src_ptr_slice_start = reinterpret_cast<const unsigned char *>(_Src);
2657  _Output_iterator _Dst_depth_slice_start = _Dst;
2658  for (size_t _I = 0; _I < _Depth; _I++)
2659  {
2660  _Output_iterator _Dst_iter = _Dst_depth_slice_start;
2661  const unsigned char *_Src_ptr = _Src_ptr_slice_start;
2662 
2663  for (size_t _J = 0; _J < _Height; _J++)
2664  {
2665  const _Value_type * _Src_end = reinterpret_cast<const _Value_type *>(_Src_ptr) + _Width;
2666 
2667  std::copy(stdext::make_unchecked_array_iterator(reinterpret_cast<const _Value_type*>(_Src_ptr)), stdext::make_unchecked_array_iterator(_Src_end), _Dst_iter);
2668 
2669  std::advance(_Dst_iter, _Dst_row_pitch);
2670  _Src_ptr += _Src_row_pitch_in_bytes;
2671  }
2672 
2673  _Src_ptr_slice_start += _Src_depth_pitch_in_bytes;
2674  std::advance(_Dst_depth_slice_start, _Dst_depth_pitch);
2675  }
2676  }
2677  break;
2678  default:
2679  _ASSERTE(FALSE);
2680  break;
2681  }
2682  }
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2072
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:729
#define _ASSERTE(expr)
Definition: crtdbg.h:216
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:695
_AMPIMP _Event __cdecl Concurrency::details::_Copy_impl ( _In_ _Buffer _Src,
size_t  _Src_offset,
_Out_ _Buffer _Dst,
size_t  _Dest_offset,
size_t  _Num_elems,
size_t  _Preferred_copy_chunk_num_elems = 0 
)
template<typename InputIterator , typename _Value_type >
_Event Concurrency::details::_Copy_impl ( InputIterator  _SrcFirst,
InputIterator  _SrcLast,
size_t  _NumElemsToCopy,
_Out_ _Buffer _Dst,
size_t  _Dest_offset,
size_t  _Preferred_copy_chunk_num_elems = 0 
)
inline
2768  {
2769  if (_NumElemsToCopy == 0) {
2770  return _Event();
2771  }
2772 
2773  if (_Dst == NULL) {
2774  throw runtime_exception("Failed to copy to buffer.", E_INVALIDARG);
2775  }
2776 
2777 #pragma warning ( push )
2778 #pragma warning ( disable : 6001 ) // Using uninitialized memory '*_Dst'
2779  if (((_NumElemsToCopy * sizeof(_Value_type)) + (_Dest_offset * _Dst->_Get_elem_size())) > (_Dst->_Get_num_elems() * _Dst->_Get_elem_size()))
2780  {
2781  throw runtime_exception("Invalid _Src argument(s). _Src size exceeds total size of the _Dest.", E_INVALIDARG);
2782  }
2783 #pragma warning ( pop )
2784 
2785  _ASSERTE(_NumElemsToCopy == (size_t)(std::distance(_SrcFirst, _SrcLast)));
2786 
2787  // If the dest is host accessible for write then we do the copy on
2788  // accelerator(accelerator::cpu_accelerator).default_view
2789  if (_Dst->_Is_host_accessible(_Write_access))
2790  {
2791  // Lets first map the _Dst buffer
2792  _Event _Ev = _Dst->_Map_buffer_async(_Write_access);
2793 
2794  // The _Dest is accessible on host. We just need to do a std::copy using a raw pointer as OutputIterator
2795  _Buffer_ptr _PDestBuf = _Dst;
2796  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PDestBuf,_Dest_offset, _SrcFirst, _SrcLast]() mutable -> _Event
2797  {
2798  _Value_type *_DestPtr = reinterpret_cast<_Value_type*>(reinterpret_cast<char*>(_PDestBuf->_Get_host_ptr()) + (_Dest_offset * _PDestBuf->_Get_elem_size()));
2799  std::copy(_SrcFirst, _SrcLast, stdext::make_unchecked_array_iterator(_DestPtr));
2800 
2801  return _Event();
2802  }));
2803 
2804  return _Ev;
2805  }
2806  else
2807  {
2808  // _Dest is on a device. Lets create a temp staging buffer on the _Dest accelerator_view and copy the input over
2809  // We may create a staging buffer of size smaller than the copy size and in that case we will perform the copy
2810  // as a series of smaller copies
2811  _Buffer_ptr _PDestBuf = _Dst;
2812  size_t _NumElemsToCopyRemaining = _NumElemsToCopy;
2813  size_t _PreferredNumElemsToCopyPerChunk = _Preferred_copy_chunk_num_elems;
2814  if (_PreferredNumElemsToCopyPerChunk == 0) {
2815  // If a preferred copy chunk size was not specified, lets pick one based on the
2816  // size of the copy
2817  _PreferredNumElemsToCopyPerChunk = _Get_preferred_copy_chunk_num_elems(_NumElemsToCopy, sizeof(_Value_type));
2818  }
2819  size_t _CurrDstOffset = _Dest_offset;
2820  InputIterator _CurrStartIter = _SrcFirst;
2821  _Event _Ev;
2822 
2823  size_t _Lcm = _Least_common_multiple(_Dst->_Get_elem_size(), sizeof(_Value_type));
2824  size_t _AdjustmentRatio = _Lcm / sizeof(_Value_type);
2825 
2826  do
2827  {
2828  size_t _AllocationNumElems = _PreferredNumElemsToCopyPerChunk;
2829  if (_NumElemsToCopyRemaining < _AllocationNumElems) {
2830  _AllocationNumElems = _NumElemsToCopyRemaining;
2831  }
2832 
2833  _Buffer_ptr _PDestStagingBuf = _Buffer::_Get_temp_staging_buffer(_Dst->_Get_accelerator_view(),
2834  _AllocationNumElems, sizeof(_Value_type));
2835 
2836  _ASSERTE(_PDestStagingBuf != NULL);
2837  _ASSERTE(_PDestStagingBuf->_Get_elem_size() == sizeof(_Value_type));
2838 
2839  InputIterator _CurrEndIter = _CurrStartIter;
2840  size_t _CurrNumElemsToCopy = _AllocationNumElems;
2841  if (_CurrNumElemsToCopy > _PDestStagingBuf->_Get_num_elems()) {
2842  _CurrNumElemsToCopy = _PDestStagingBuf->_Get_num_elems();
2843  }
2844 
2845  if (_NumElemsToCopyRemaining <= _CurrNumElemsToCopy) {
2846  _CurrNumElemsToCopy = _NumElemsToCopyRemaining;
2847  _CurrEndIter = _SrcLast;
2848  }
2849  else
2850  {
2851  // We need to adjust the _CurrNumElemsToCopy to be a multiple of the
2852  // least common multiple of the destination buffer's element size and sizeof(_Value_type).
2853  _CurrNumElemsToCopy = (_CurrNumElemsToCopy / _AdjustmentRatio) * _AdjustmentRatio;
2854  std::advance(_CurrEndIter, _CurrNumElemsToCopy);
2855  }
2856 
2857  _ASSERTE((_CurrNumElemsToCopy % _AdjustmentRatio) == 0);
2858 
2859  // This would not actually never block since we just created this staging buffer or are using
2860  // a cached one that is not in use
2861  _PDestStagingBuf->_Map_buffer(_Write_access, true /* _Wait */);
2862 
2863  // Copy from input to the staging using a raw pointer as OutputIterator
2864  std::copy(_CurrStartIter, _CurrEndIter, stdext::make_unchecked_array_iterator(reinterpret_cast<_Value_type*>(_PDestStagingBuf->_Get_host_ptr())));
2865 
2866  _Ev = _Ev._Add_event(_PDestStagingBuf->_Copy_to_async(_PDestBuf, _CurrNumElemsToCopy, 0, _CurrDstOffset));
2867 
2868  // Adjust the iterators and offsets
2869  _NumElemsToCopyRemaining -= _CurrNumElemsToCopy;
2870  _CurrDstOffset += (_CurrNumElemsToCopy * sizeof(_Value_type)) / _Dst->_Get_elem_size();
2871  _CurrStartIter = _CurrEndIter;
2872 
2873  } while (_NumElemsToCopyRemaining != 0);
2874 
2875  return _Ev;
2876  }
2877  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2072
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:729
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
#define NULL
Definition: crtdbg.h:30
Definition: chrono:305
iterator_traits< _InIt >::difference_type distance(_InIt _First, _InIt _Last)
Definition: xutility:755
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
#define _ASSERTE(expr)
Definition: crtdbg.h:216
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:695
Definition: amprt.h:312
Definition: amprt.h:86
_T _Least_common_multiple(_T _M, _T _N)
Definition: amprt.h:2755
size_t _Get_preferred_copy_chunk_num_elems(size_t _Total_num_elems, size_t _Elem_size)
Definition: amprt.h:2686
template<typename OutputIterator , typename _Value_type >
_Event Concurrency::details::_Copy_impl ( _In_ _Buffer _Src,
size_t  _Src_offset,
size_t  _Num_elems,
OutputIterator  _DestIter,
size_t  _Preferred_copy_chunk_num_elems = 0 
)
inline
2902  {
2903  if ((_Src == NULL) || ((_Src_offset + _Num_elems) > _Src->_Get_num_elems())) {
2904  throw runtime_exception("Failed to copy to buffer.", E_INVALIDARG);
2905  }
2906 
2907  if (_Num_elems == 0) {
2908  return _Event();
2909  }
2910 
2911  size_t _NumElemsToCopy = (_Num_elems * _Src->_Get_elem_size()) / sizeof(_Value_type);
2912 
2913  // If the src is host accessible for readthen we do the copy on
2914  // accelerator(accelerator::cpu_accelerator).default_view
2915  if (_Src->_Is_host_accessible(_Read_access))
2916  {
2917  // Map the _Src buffer
2918  _Event _Ev = _Src->_Map_buffer_async(_Read_access);
2919 
2920  // The _Src is accessible on host. We just need to do a std::copy using a raw pointer as OutputIterator
2921  _Buffer_ptr _PSrcBuf = _Src;
2922  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _Src_offset, _DestIter, _NumElemsToCopy]() mutable -> _Event
2923  {
2924  // The _Src is accessible on host. We just need to do a std::copy
2925  const _Value_type *_PFirst = reinterpret_cast<const _Value_type*>(reinterpret_cast<char*>(_PSrcBuf->_Get_host_ptr()) + (_Src_offset * _PSrcBuf->_Get_elem_size()));
2926  std::copy(_PFirst, _PFirst + _NumElemsToCopy, _DestIter);
2927 
2928  return _Event();
2929  }));
2930 
2931  return _Ev;
2932  }
2933  else
2934  {
2935  // The _Src is on the device. We need to copy it out to a temporary staging array
2936  // We may create a staging buffer of size smaller than the copy size and in that case we will
2937  // perform the copy as a series of smaller copies
2938 
2939  _Event _Ev;
2940 
2941  _Buffer_ptr _PSrcBuf = _Src;
2942  size_t _PreferredNumElemsToCopyPerChunk = _Preferred_copy_chunk_num_elems;
2943  if (_PreferredNumElemsToCopyPerChunk == 0) {
2944  // If a preferred copy chunk size was not specified, lets pick one based on the
2945  // size of the copy
2946  _PreferredNumElemsToCopyPerChunk = _Get_preferred_copy_chunk_num_elems(_NumElemsToCopy, sizeof(_Value_type));
2947  }
2948 
2949  size_t _AllocationNumElems = _PreferredNumElemsToCopyPerChunk;
2950  if (_NumElemsToCopy < _AllocationNumElems) {
2951  _AllocationNumElems = _NumElemsToCopy;
2952  }
2953 
2954  _Buffer_ptr _PSrcStagingBuf = _Buffer::_Get_temp_staging_buffer(_Src->_Get_accelerator_view(),
2955  _AllocationNumElems, sizeof(_Value_type));
2956 
2957  _ASSERTE(_PSrcStagingBuf != NULL);
2958  _ASSERTE(_PSrcStagingBuf->_Get_elem_size() == sizeof(_Value_type));
2959 
2960  // The total byte size of a copy chunk must be an integral multiple of both the
2961  // source buffer's element size and sizeof(_Value_type).
2962  size_t _Lcm = _Least_common_multiple(_Src->_Get_elem_size(), sizeof(_Value_type));
2963  size_t _AdjustmentRatio = _Lcm / sizeof(_Value_type);
2964 
2965  size_t _CurrNumElemsToCopy = _AllocationNumElems;
2966  if (_CurrNumElemsToCopy > _PSrcStagingBuf->_Get_num_elems()) {
2967  _CurrNumElemsToCopy = _PSrcStagingBuf->_Get_num_elems();
2968  }
2969  if (_NumElemsToCopy <= _CurrNumElemsToCopy)
2970  {
2971  _CurrNumElemsToCopy = _NumElemsToCopy;
2972  }
2973  else
2974  {
2975  // We need to adjust the _StagingBufNumElems to be a multiple of the
2976  // least common multiple of the source buffer's element size and sizeof(_Value_type).
2977  _CurrNumElemsToCopy = (_CurrNumElemsToCopy / _AdjustmentRatio) * _AdjustmentRatio;
2978  }
2979 
2980  _ASSERTE((_CurrNumElemsToCopy % _AdjustmentRatio) == 0);
2981 
2982  size_t _NumElemsToCopyRemaining = _NumElemsToCopy - _CurrNumElemsToCopy;
2983 
2984  _Ev = _PSrcBuf->_Copy_to_async(_PSrcStagingBuf, (_CurrNumElemsToCopy * sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size(), _Src_offset, 0);
2985 
2986  if (_NumElemsToCopyRemaining != 0)
2987  {
2988  _Ev = _Ev._Add_continuation(std::function<_Event()>([_DestIter, _PSrcBuf, _PSrcStagingBuf,
2989  _CurrNumElemsToCopy, _NumElemsToCopyRemaining,
2990  _Src_offset, _PreferredNumElemsToCopyPerChunk]() mutable -> _Event
2991  {
2992  // Initiate an asynchronous copy of the remaining part so that this part of the copy
2993  // makes progress while we consummate the copying of the first part
2994  size_t _CurrSrcOffset = _Src_offset + ((_CurrNumElemsToCopy * sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size());
2995  OutputIterator _CurrDestIter = _DestIter;
2996  _Advance_output_iterator<decltype(_CurrDestIter), size_t>(_CurrDestIter, _CurrNumElemsToCopy);
2997  _Event _Ret_ev = _Copy_impl<OutputIterator, _Value_type>(_PSrcBuf._Get_ptr(), _CurrSrcOffset,
2998  (_NumElemsToCopyRemaining * sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size(),
2999  _CurrDestIter, _PreferredNumElemsToCopyPerChunk);
3000 
3001  // Now copy the data from staging buffer to the destination
3002  _Value_type *_PFirst = reinterpret_cast<_Value_type*>(_PSrcStagingBuf->_Get_host_ptr());
3003  std::copy(_PFirst, _PFirst + _CurrNumElemsToCopy, _DestIter);
3004  return _Ret_ev;
3005  }));
3006  }
3007  else
3008  {
3009  _Ev = _Ev._Add_continuation(std::function<_Event()>([_DestIter, _PSrcStagingBuf, _CurrNumElemsToCopy]() mutable -> _Event
3010  {
3011  _Value_type *_PFirst = reinterpret_cast<_Value_type*>(_PSrcStagingBuf->_Get_host_ptr());
3012  std::copy(_PFirst, _PFirst + _CurrNumElemsToCopy, _DestIter);
3013  return _Event();
3014  }));
3015  }
3016 
3017  return _Ev;
3018  }
3019  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2072
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
Definition: amprt.h:85
#define NULL
Definition: crtdbg.h:30
Definition: chrono:305
#define _ASSERTE(expr)
Definition: crtdbg.h:216
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
Definition: amprt.h:312
_T _Least_common_multiple(_T _M, _T _N)
Definition: amprt.h:2755
size_t _Get_preferred_copy_chunk_num_elems(size_t _Total_num_elems, size_t _Elem_size)
Definition: amprt.h:2686
_AMPIMP _Event __cdecl Concurrency::details::_Copy_impl ( _In_ _Buffer _Src,
_View_shape_ptr  _Src_shape,
_Out_ _Buffer _Dst,
_View_shape_ptr  _Dst_shape 
)
template<typename InputIterator , typename _Value_type >
_Event Concurrency::details::_Copy_impl ( InputIterator  _SrcFirst,
_View_shape_ptr  _Src_shape,
_Inout_ _Buffer _Dst,
_View_shape_ptr  _Dst_shape 
)
inline
3086  {
3087  _ASSERTE(_Dst != NULL);
3088  _ASSERTE(_Src_shape != NULL);
3089  _ASSERTE(_Dst_shape != NULL);
3090 
3091  if (_Src_shape->_Is_projection()) {
3092  _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
3093  }
3094 
3095  if (_Dst_shape->_Is_projection()) {
3096  _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
3097  }
3098 
3099  _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
3100 
3101  _ASSERTE(_View_shape::_Compare_extent_with_elem_size(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(),
3102  sizeof(_Value_type), _Dst_shape->_Get_view_extent(), _Dst->_Get_elem_size()));
3103 
3104  if (_Dst->_Is_host_accessible(_Write_access))
3105  {
3106  // The destination buffer is accesible on the host. Map the _Dst buffer
3107  _Event _Ev = _Dst->_Map_buffer_async(_Write_access);
3108  _Buffer_ptr _PDestBuf = _Dst;
3109  return _Ev._Add_continuation(std::function<_Event()>([_SrcFirst, _Src_shape, _PDestBuf, _Dst_shape]() mutable -> _Event {
3110  return _Copy_impl_iter(_SrcFirst, _Src_shape, stdext::make_unchecked_array_iterator(reinterpret_cast<_Value_type*>(_PDestBuf->_Get_host_ptr())),
3111  _Create_reinterpreted_shape(_Dst_shape, _PDestBuf->_Get_elem_size(), sizeof(_Value_type)));
3112  }));
3113  }
3114  else
3115  {
3116  // The dest buffer is not accesible on host. Lets create a temporary
3117  // staging buffer on the destination buffer's accelerator_view
3118  _Buffer_ptr _PTempStagingBuf = _Buffer::_Create_stage_buffer(_Dst->_Get_accelerator_view(), accelerator(accelerator::cpu_accelerator).default_view,
3119  _Src_shape->_Get_view_size(), sizeof(_Value_type), true /* _Is_temp */);
3120 
3121  _PTempStagingBuf->_Map_buffer(_Write_access, true /* _Wait */);
3122  _Value_type *_Dst_ptr = reinterpret_cast<_Value_type*>(_PTempStagingBuf->_Get_host_ptr());
3123  _Event _Ev = _Copy_impl_iter(_SrcFirst, _Src_shape, stdext::make_unchecked_array_iterator(_Dst_ptr), _Src_shape);
3124 
3125  // Now copy from the staging buffer to the destination buffer
3126  _Buffer_ptr _PDestBuf = _Dst;
3127  return _Ev._Add_continuation(std::function<_Event()>([_PTempStagingBuf, _Src_shape, _PDestBuf, _Dst_shape]() mutable -> _Event {
3128  return _Copy_impl(_PTempStagingBuf, _Src_shape, _PDestBuf, _Dst_shape);
3129  }));
3130  }
3131  }
_Event _Copy_impl(_In_ _Buffer *_Src, _View_shape_ptr _Src_shape, OutputIterator _DestIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3134
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:729
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
#define NULL
Definition: crtdbg.h:30
_Event _Copy_impl_iter(_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3183
#define _ASSERTE(expr)
Definition: crtdbg.h:216
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1961
Definition: amprt.h:312
Definition: amprt.h:86
template<typename OutputIterator , typename _Value_type >
_Event Concurrency::details::_Copy_impl ( _In_ _Buffer _Src,
_View_shape_ptr  _Src_shape,
OutputIterator  _DestIter,
_View_shape_ptr  _Dst_shape 
)
inline
3135  {
3136  _ASSERTE(_Src != NULL);
3137  _ASSERTE(_Src_shape != NULL);
3138  _ASSERTE(_Dst_shape != NULL);
3139 
3140  if (_Src_shape->_Is_projection()) {
3141  _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
3142  }
3143 
3144  if (_Dst_shape->_Is_projection()) {
3145  _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
3146  }
3147 
3148  _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
3149 
3150  _ASSERTE(_View_shape::_Compare_extent_with_elem_size(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(),
3151  _Src->_Get_elem_size(), _Dst_shape->_Get_view_extent(), sizeof(_Value_type)));
3152 
3153  if (_Src->_Is_host_accessible(_Read_access))
3154  {
3155  // The source buffer is accessible on the host. Map the _Src buffer
3156  _Event _Ev = _Src->_Map_buffer_async(_Read_access);
3157 
3158  _Buffer_ptr _PSrcBuf = _Src;
3159  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _Src_shape, _DestIter, _Dst_shape]() mutable -> _Event {
3160  return _Copy_impl_iter(reinterpret_cast<_Value_type*>(_PSrcBuf->_Get_host_ptr()),
3161  _Create_reinterpreted_shape(_Src_shape, _PSrcBuf->_Get_elem_size(), sizeof(_Value_type)),
3162  _DestIter, _Dst_shape);
3163  }));
3164  }
3165  else
3166  {
3167  // The source buffer is not accessible on host. Lets create a temporary
3168  // staging buffer on the source buffer's accelerator_view and initiate a copy
3169  // from the source buffer to the temporary staging buffer
3170  _Buffer_ptr _PTempStagingBuf = _Buffer::_Create_stage_buffer(_Src->_Get_accelerator_view(), accelerator(accelerator::cpu_accelerator).default_view,
3171  _Dst_shape->_Get_view_size(), sizeof(_Value_type), true);
3172 
3173  _Event _Ev = _Src->_Copy_to_async(_PTempStagingBuf, _Src_shape, _Dst_shape);
3174  return _Ev._Add_continuation(std::function<_Event()>([_PTempStagingBuf, _Dst_shape, _DestIter]() mutable -> _Event {
3175  return _Copy_impl_iter(reinterpret_cast<_Value_type*>(_PTempStagingBuf->_Get_host_ptr()),
3176  _Dst_shape, _DestIter, _Dst_shape);
3177  }));
3178  }
3179  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:302
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
Definition: amprt.h:85
#define NULL
Definition: crtdbg.h:30
_Event _Copy_impl_iter(_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3183
#define _ASSERTE(expr)
Definition: crtdbg.h:216
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1961
Definition: amprt.h:312
template<typename _InputInterator , typename _OutputIterator >
_Event Concurrency::details::_Copy_impl_iter ( _InputInterator  _SrcFirst,
_InputInterator  _SrcLast,
_OutputIterator  _DstFirst 
)
inline
3078  {
3079  std::copy(_SrcFirst, _SrcLast, _DstFirst);
3080  return _Event();
3081  }
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2072
Definition: amprt.h:312
template<typename _InputInterator , typename _OutputIterator >
_Event Concurrency::details::_Copy_impl_iter ( _InputInterator  _SrcIter,
_View_shape_ptr  _Src_shape,
_OutputIterator  _DstIter,
_View_shape_ptr  _Dst_shape 
)
inline
3185  {
3186  if (_Src_shape->_Is_projection()) {
3187  _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
3188  }
3189 
3190  if (_Dst_shape->_Is_projection()) {
3191  _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
3192  }
3193 
3194  _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
3195  _ASSERTE(_View_shape::_Compare_extent(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(), _Dst_shape->_Get_view_extent()));
3196 
3197  // If both the _Src_shape and _Dst_shape are linear we can be more efficient
3198  unsigned int _Src_linear_offset, _Src_linear_size, _Dst_linear_offset, _Dst_linear_size;
3199  if (_Src_shape->_Is_view_linear(_Src_linear_offset, _Src_linear_size) &&
3200  _Dst_shape->_Is_view_linear(_Dst_linear_offset, _Dst_linear_size))
3201  {
3202  _ASSERTE(_Src_linear_size == _Dst_linear_size);
3203 
3204  // These iterators might be not contiguous, therefore we use std::advance
3205  std::advance(_SrcIter, _Src_linear_offset);
3206  auto _SrcLast = _SrcIter;
3207  std::advance(_SrcLast, _Src_linear_size);
3208  std::advance(_DstIter, _Dst_linear_offset);
3209 
3210  return _Copy_impl_iter(_SrcIter, _SrcLast, _DstIter);
3211  }
3212 
3213  std::vector<unsigned int> _Src_extent(_Src_shape->_Get_rank());
3214  std::vector<unsigned int> _Src_offset(_Src_shape->_Get_rank());
3215  std::vector<unsigned int> _Dst_extent(_Dst_shape->_Get_rank());
3216  std::vector<unsigned int> _Dst_offset(_Dst_shape->_Get_rank());
3217  std::vector<unsigned int> _Copy_extent(_Src_shape->_Get_rank());
3218 
3219  for (size_t i = 0; i < _Src_shape->_Get_rank(); ++i) {
3220  _Src_extent[i] = _Src_shape->_Get_base_extent()[i];
3221  _Src_offset[i] = _Src_shape->_Get_view_offset()[i];
3222  _Dst_extent[i] = _Dst_shape->_Get_base_extent()[i];
3223  _Dst_offset[i] = _Dst_shape->_Get_view_offset()[i];
3224  _Copy_extent[i] = _Src_shape->_Get_view_extent()[i];
3225  }
3226 
3227  _Array_copy_desc _Desc(
3228  _Src_shape->_Get_rank(),
3229  _Src_shape->_Get_linear_offset(),
3230  _Src_extent.data(),
3231  _Src_offset.data(),
3232  _Dst_shape->_Get_linear_offset(),
3233  _Dst_extent.data(),
3234  _Dst_offset.data(),
3235  _Copy_extent.data());
3236 
3237  // Note: Capturing shape pointers would be incorrect, they are valid for setting up the call.
3238  // They might be deleted right after this call completes.
3239  HRESULT hr = _Recursive_array_copy(_Desc, 1, [_SrcIter, _DstIter](const _Array_copy_desc &_Reduced) -> HRESULT {
3240 
3241  auto _SrcFirst = _SrcIter;
3242  auto _DstFirst = _DstIter;
3243 
3244  std::advance(_DstFirst, _Reduced._Dst_linear_offset + _Reduced._Dst_copy_offset[0]);
3245  std::advance(_SrcFirst, _Reduced._Src_linear_offset + _Reduced._Src_copy_offset[0]);
3246  auto _SrcLast = _SrcFirst;
3247  std::advance(_SrcLast, _Reduced._Copy_extents[0]);
3248 
3249  std::copy(_SrcFirst, _SrcLast, _DstFirst);
3250 
3251  return S_OK;
3252  });
3253 
3254  if (FAILED(hr)) {
3255  throw Concurrency::runtime_exception("Failed to copy between buffers", E_FAIL);
3256  }
3257 
3258  return _Event();
3259  }
std::vector< unsigned int > _Copy_extents
Definition: amprt.h:3064
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2072
#define S_OK
Definition: comutil.h:62
std::vector< unsigned int > _Dst_copy_offset
Definition: amprt.h:3061
_Event _Copy_impl_iter(_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3183
int i[4]
Definition: dvec.h:70
#define _ASSERTE(expr)
Definition: crtdbg.h:216
#define FAILED(hr)
Definition: comutil.h:71
std::vector< unsigned int > _Src_copy_offset
Definition: amprt.h:3056
unsigned int _Dst_linear_offset
Definition: amprt.h:3059
Exception thrown due to a C++ AMP runtime_exception. This is the base type for all C++ AMP exception ...
Definition: amprt.h:835
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:695
unsigned int _Src_linear_offset
Definition: amprt.h:3054
Definition: amprt.h:312
_AMPIMP HRESULT __cdecl _Recursive_array_copy(const _Array_copy_desc &_Desc, unsigned int _Native_copy_rank, std::function< HRESULT(const _Array_copy_desc &_Reduced)> _Native_copy_func)
_Ret_ _View_shape* Concurrency::details::_Create_reinterpreted_shape ( const _View_shape _Source_shape,
size_t  _Curr_elem_size,
size_t  _New_elem_size 
)
inline
1962  {
1963  unsigned int _Rank = _Source_shape->_Get_rank();
1964  size_t _LinearOffsetInBytes = _Source_shape->_Get_linear_offset() * _Curr_elem_size;
1965  size_t _BaseLSDExtentInBytes = (_Source_shape->_Get_base_extent())[_Rank - 1] * _Curr_elem_size;
1966  size_t _ViewLSDOffsetInBytes = (_Source_shape->_Get_view_offset())[_Rank - 1] * _Curr_elem_size;
1967  size_t _ViewLSDExtentInBytes = (_Source_shape->_Get_view_extent())[_Rank - 1] * _Curr_elem_size;
1968 
1969  _ASSERTE((_LinearOffsetInBytes % _New_elem_size) == 0);
1970  _ASSERTE((_BaseLSDExtentInBytes % _New_elem_size) == 0);
1971  _ASSERTE((_ViewLSDOffsetInBytes % _New_elem_size) == 0);
1972  _ASSERTE((_ViewLSDExtentInBytes % _New_elem_size) == 0);
1973 
1974  size_t _Temp_val = _LinearOffsetInBytes / _New_elem_size;
1975  _ASSERTE(_Temp_val <= UINT_MAX);
1976  unsigned int _New_linear_offset = static_cast<unsigned int>(_Temp_val);
1977 
1978  std::vector<unsigned int> _New_base_extent(_Rank);
1979  std::vector<unsigned int> _New_view_offset(_Rank);
1980  std::vector<unsigned int> _New_view_extent(_Rank);
1981  for (unsigned int i = 0; i < _Rank - 1; ++i) {
1982  _New_base_extent[i] = (_Source_shape->_Get_base_extent())[i];
1983  _New_view_offset[i] = (_Source_shape->_Get_view_offset())[i];
1984  _New_view_extent[i] = (_Source_shape->_Get_view_extent())[i];
1985  }
1986 
1987  // The extent in the least significant dimension needs to be adjusted
1988  _Temp_val = _BaseLSDExtentInBytes / _New_elem_size;
1989  _ASSERTE(_Temp_val <= UINT_MAX);
1990  _New_base_extent[_Rank - 1] = static_cast<unsigned int>(_Temp_val);
1991 
1992  _Temp_val = _ViewLSDOffsetInBytes / _New_elem_size;
1993  _ASSERTE(_Temp_val <= UINT_MAX);
1994  _New_view_offset[_Rank - 1] = static_cast<unsigned int>(_Temp_val);
1995 
1996  _Temp_val = _ViewLSDExtentInBytes / _New_elem_size;
1997  _ASSERTE(_Temp_val <= UINT_MAX);
1998  _New_view_extent[_Rank - 1] = static_cast<unsigned int>(_Temp_val);
1999 
2000  return _View_shape::_Create_view_shape(_Rank, _New_linear_offset, _New_base_extent.data(), _New_view_offset.data(), _New_view_extent.data());
2001  }
const unsigned int * _Get_base_extent() const
Definition: amprt.h:1677
const unsigned int * _Get_view_offset() const
Definition: amprt.h:1682
#define UINT_MAX
Definition: limits.h:41
const unsigned int * _Get_view_extent() const
Definition: amprt.h:1686
int i[4]
Definition: dvec.h:70
#define _ASSERTE(expr)
Definition: crtdbg.h:216
unsigned int _Get_linear_offset() const
Definition: amprt.h:1672
unsigned int _Get_rank() const
Definition: amprt.h:1667
template<class _Tuple_type >
_Tuple_type Concurrency::details::_Create_uninitialized_tuple ( )
215  {
216  return _Tuple_type(details::_do_not_initialize);
217  };
template<typename _Ty >
auto Concurrency::details::_FilterValidTaskType ( _Ty  _Param,
int   
) -> decltype(_GetTaskType(_Param, _IsCallable(_Param, 0)))
template<typename _Ty >
_BadArgType Concurrency::details::_FilterValidTaskType ( _Ty  _Param,
  ... 
)
_Ret_ _Accelerator_impl * Concurrency::details::_Get_accelerator_impl_ptr ( const accelerator &  _Accl)
inline
1638  {
1639  return _Accl._M_impl;
1640  }
_Ret_ _Accelerator_view_impl * Concurrency::details::_Get_accelerator_view_impl_ptr ( const accelerator_view &  _Accl_view)
inline
1633  {
1634  return _Accl_view._M_impl;
1635  }
_Event Concurrency::details::_Get_access_async ( const _View_key  _Key,
accelerator_view  _Av,
_Access_mode  _Mode,
_Buffer_ptr &  _Buf_ptr 
)
inline
3458  {
3459  return _Key->_Get_buffer_ptr()->_Get_access_async(_Key->_Get_view_key(), _Av, _Mode, _Buf_ptr);
3460  }
_Check_return_ _In_ int _Mode
Definition: io.h:338
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
friend _Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
template<typename _Array_type >
_Event Concurrency::details::_Get_access_async ( const _Array_type &  _Array,
_Access_mode  _Mode,
_Buffer_ptr &  _Buf_ptr 
)
1081  {
1082  return _Array._Get_access_async(_Mode, _Buf_ptr);
1083  }
_Check_return_ _In_ int _Mode
Definition: io.h:338
_AMPIMP _Ret_ _Amp_runtime_trace* __cdecl Concurrency::details::_Get_amp_trace ( )
template<typename _Array_type >
_Ret_ _Ubiquitous_buffer* Concurrency::details::_Get_buffer ( const _Array_type &  _Array)
1071  {
1072  return _Array._Get_buffer();
1073  }
template<typename _Array_type >
const _Buffer_descriptor& Concurrency::details::_Get_buffer_descriptor ( const _Array_type &  _Array)
1065  {
1066  return _Array._M_buffer_descriptor;
1067  }
_Ret_ _View_shape* Concurrency::details::_Get_buffer_view_shape ( const _Buffer_descriptor _Descriptor)
inline
3464  {
3465  return _Descriptor._Get_buffer_ptr()->_Get_view_shape(_Descriptor._Get_view_key());
3466  }
_AMPIMP _View_shape_ptr _Get_view_shape(_In_ _View_key _Key)
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
bool Concurrency::details::_Get_chunked_staging_texture ( _In_ _Texture _Tex,
const size_t _Copy_chunk_extent,
_Inout_ size_t _Remaining_copy_extent,
_Out_ size_t _Curr_copy_extent,
_Out_ _Texture_ptr *  _Staging_texture 
)
inline
2520  {
2521  bool _Truncated_copy = false;
2522  size_t _Allocation_extent[3] = { _Copy_chunk_extent[0], _Copy_chunk_extent[1], _Copy_chunk_extent[2] };
2523 
2524  unsigned int _Most_sig_idx = _Tex->_Get_rank() - 1;
2525 
2526  if (_Allocation_extent[_Most_sig_idx] > _Remaining_copy_extent[_Most_sig_idx]) {
2527  _Allocation_extent[_Most_sig_idx] = _Remaining_copy_extent[_Most_sig_idx];
2528  }
2529 
2530  _Texture_ptr _Stage = _Texture::_Get_temp_staging_texture(_Tex->_Get_accelerator_view(), _Tex->_Get_rank(),
2531  _Allocation_extent[0], _Allocation_extent[1], _Allocation_extent[2],
2532  /*_Mip_levels=*/1, _Tex->_Get_texture_format());
2533 
2534  std::copy(&_Allocation_extent[0], &_Allocation_extent[3], stdext::make_unchecked_array_iterator(&_Curr_copy_extent[0]));
2535  size_t _Staging_tex_extent[3] = {_Stage->_Get_width(), _Stage->_Get_height(), _Stage->_Get_depth()};
2536  if (_Curr_copy_extent[_Most_sig_idx] > _Staging_tex_extent[_Most_sig_idx]) {
2537  _Curr_copy_extent[_Most_sig_idx] = _Staging_tex_extent[_Most_sig_idx];
2538  }
2539 
2540  // The truncation can however happen only in the most significant dimension and lower
2541  // dimensions should not get truncated
2542  if (_Curr_copy_extent[_Most_sig_idx] < _Remaining_copy_extent[_Most_sig_idx])
2543  {
2544  _Remaining_copy_extent[_Most_sig_idx] -= _Curr_copy_extent[_Most_sig_idx];
2545  _Truncated_copy = true;
2546  }
2547 
2548  for (unsigned int _I = 0; _I < _Most_sig_idx; _I++)
2549  {
2550  _ASSERTE(_Curr_copy_extent[_I] == _Remaining_copy_extent[_I]);
2551  }
2552 
2553  *_Staging_texture = _Stage;
2554  return _Truncated_copy;
2555  }
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2072
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:729
details::_Reference_counted_obj_ptr< details::_Texture > _Texture_ptr
Definition: amprt.h:303
#define _ASSERTE(expr)
Definition: crtdbg.h:216
access_type Concurrency::details::_Get_cpu_access_type ( _Access_mode  _Cpu_access_mode)
inline
2022  {
2023  access_type _Cpu_access_type = access_type_none;
2024  if (_Cpu_access_mode & _Read_access) {
2025  _Cpu_access_type = static_cast<access_type>(_Cpu_access_type | access_type_read);
2026  }
2027 
2028  if (_Cpu_access_mode & _Write_access) {
2029  _Cpu_access_type = static_cast<access_type>(_Cpu_access_type | access_type_write);
2030  }
2031 
2032  return _Cpu_access_type;
2033  }
Definition: amprt.h:85
Definition: amprt.h:101
Definition: amprt.h:99
Definition: amprt.h:100
Definition: amprt.h:86
access_type
Enumeration type used to denote the various types of access to data.
Definition: amprt.h:97
template<typename _Array_type >
void* Concurrency::details::_Get_datasource_identity ( const _Array_type &  _Array)
1089  {
1090  return _Get_buffer_descriptor(_Array)._Get_buffer_ptr()._Get_ptr();
1091  }
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1064
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
_AMPIMP _Ret_ _Accelerator_impl_ptr* __cdecl Concurrency::details::_Get_devices ( )
template<int _Rank>
extent< _Rank > Concurrency::details::_Get_extent_at_level ( const extent< _Rank > &  _Base_extent,
unsigned int  _Level 
)
inline
142 {
144  return _Get_extent_at_level_unsafe(_Base_extent, _Level);
145 }
void _Are_valid_mipmap_parameters(unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
Definition: xxamp.h:1255
extent< _Rank > _Get_extent_at_level_unsafe(const extent< _Rank > &_Base_extent, unsigned int _Level) __GPU
Definition: xxamp_inl.h:95
template<int _Rank>
extent< _Rank > Concurrency::details::_Get_extent_at_level_unsafe ( const extent< _Rank > &  _Base_extent,
unsigned int  _Level 
)
inline
96 {
97  static_assert(_Rank >= 3);
98 }
template<>
extent<1> Concurrency::details::_Get_extent_at_level_unsafe< 1 > ( const extent< 1 > &  _Base_extent,
unsigned int  _Level 
)
inline
102 {
103  extent<1> _Extent_at_level(_Base_extent);
104 
105  _Extent_at_level[0] >>= _Level;
106  _Extent_at_level[0] = _Extent_at_level[0] ? _Extent_at_level[0] : 1;
107 
108  return _Extent_at_level;
109 }
template<>
extent<2> Concurrency::details::_Get_extent_at_level_unsafe< 2 > ( const extent< 2 > &  _Base_extent,
unsigned int  _Level 
)
inline
113 {
114  extent<2> _Extent_at_level(_Base_extent);
115 
116  _Extent_at_level[0] >>= _Level;
117  _Extent_at_level[1] >>= _Level;
118  _Extent_at_level[0] = _Extent_at_level[0] ? _Extent_at_level[0] : 1;
119  _Extent_at_level[1] = _Extent_at_level[1] ? _Extent_at_level[1] : 1;
120 
121  return _Extent_at_level;
122 }
template<>
extent<3> Concurrency::details::_Get_extent_at_level_unsafe< 3 > ( const extent< 3 > &  _Base_extent,
unsigned int  _Level 
)
inline
126 {
127  extent<3> _Extent_at_level(_Base_extent);
128 
129  _Extent_at_level[0] >>= _Level;
130  _Extent_at_level[1] >>= _Level;
131  _Extent_at_level[2] >>= _Level;
132  _Extent_at_level[0] = _Extent_at_level[0] ? _Extent_at_level[0] : 1;
133  _Extent_at_level[1] = _Extent_at_level[1] ? _Extent_at_level[1] : 1;
134  _Extent_at_level[2] = _Extent_at_level[2] ? _Extent_at_level[2] : 1;
135 
136  return _Extent_at_level;
137 }
_Internal_task_options & Concurrency::details::_get_internal_task_options ( task_options &  options)
inline
1371  {
1372  return options._M_InternalTaskOptions;
1373  }
const _Internal_task_options & Concurrency::details::_get_internal_task_options ( const task_options &  options)
inline
1375  {
1376  return options._M_InternalTaskOptions;
1377  }
template<int _Rank>
unsigned int Concurrency::details::_Get_max_mipmap_levels ( const extent< _Rank > &  _Extent)
inline
1235  {
1236  unsigned int _Mipmap_levels = 0;
1237 
1238  // Find the largest dimension
1239  unsigned int _Max_dim = static_cast<unsigned int>(_Extent[0]);
1240  for(int _I=1; _I<_Rank; ++_I)
1241  {
1242  _Max_dim = static_cast<unsigned int>(_Extent[_I]) > _Max_dim ? static_cast<unsigned int>(_Extent[_I]) : _Max_dim;
1243  }
1244 
1245  // Find out how many times we can divide it by 2
1246  while(_Max_dim > 0)
1247  {
1248  _Mipmap_levels++;
1249  _Max_dim >>= 1;
1250  }
1251 
1252  return _Mipmap_levels;
1253  }
unsigned int Concurrency::details::_Get_mipmap_levels ( const _Texture _Tex)
inline
4069 {
4070  _ASSERTE(_Tex);
4071  return _Tex->_Get_mip_levels();
4072 }
unsigned int _Get_mip_levels() const
Definition: amprt.h:2382
#define _ASSERTE(expr)
Definition: crtdbg.h:216
unsigned char Concurrency::details::_Get_msb ( size_t  _Mask)
inline
72 {
73  unsigned long _Index = 0;
74 
75 #if (defined (_M_IX86) || defined (_M_ARM))
76  _BitScanReverse(&_Index, _Mask);
77 #else /* (defined (_M_IX86) || defined (_M_ARM)) */
78  _BitScanReverse64(&_Index, _Mask);
79 #endif /* (defined (_M_IX86) || defined (_M_ARM)) */
80 
81  return (unsigned char) _Index;
82 }
_CRT_MANAGED_FP_DEPRECATE _In_ unsigned int _Mask
Definition: float.h:120
_AMPIMP size_t __cdecl Concurrency::details::_Get_num_devices ( )
void Concurrency::details::_Get_preferred_copy_chunk_extent ( unsigned int  _Rank,
size_t  _Width,
size_t  _Height,
size_t  _Depth,
size_t  _Bits_per_element,
_Out_writes_(3) size_t _Preferred_copy_chunk_extent 
)
inline
2695  {
2696  _ASSERTE(_Preferred_copy_chunk_extent != nullptr);
2697 
2698  size_t requestedByteSize = static_cast<size_t>((static_cast<unsigned long long>(_Width) *
2699  static_cast<unsigned long long>(_Height) *
2700  static_cast<unsigned long long>(_Depth) *
2701  static_cast<unsigned long long>(_Bits_per_element)) >> 3);
2702 
2703  size_t preferredChunkSize = _Get_preferred_copy_chunk_size(requestedByteSize);
2704 
2705  // Lets align the allocation size to the element size of the texture
2706  size_t preferredCopyChunkNumElems = static_cast<size_t>((static_cast<unsigned long long>(preferredChunkSize) * 8U) / _Bits_per_element);
2707 
2708  // Lets truncate the dimensions of the requested staging texture.
2709  // We only truncate in the most significant dimension
2710  switch (_Rank)
2711  {
2712  case 1:
2713  _Width = preferredCopyChunkNumElems;
2714  break;
2715  case 2:
2716  _Height = (preferredCopyChunkNumElems + _Width - 1) / _Width;
2717  break;
2718  case 3:
2719  _Depth = (preferredCopyChunkNumElems + (_Height * _Width) - 1) / (_Height * _Width);
2720  break;
2721  default:
2722  _ASSERTE(false);
2723  }
2724 
2725  _Preferred_copy_chunk_extent[0] = _Width;
2726  _Preferred_copy_chunk_extent[1] = _Height;
2727  _Preferred_copy_chunk_extent[2] = _Depth;
2728  }
#define _ASSERTE(expr)
Definition: crtdbg.h:216
_AMPIMP size_t __cdecl _Get_preferred_copy_chunk_size(size_t _Total_copy_size_in_bytes)
size_t Concurrency::details::_Get_preferred_copy_chunk_num_elems ( size_t  _Total_num_elems,
size_t  _Elem_size 
)
inline
2687  {
2688  size_t preferredChunkSize = _Get_preferred_copy_chunk_size(_Total_num_elems * _Elem_size);
2689 
2690  return (preferredChunkSize / _Elem_size);
2691  }
_AMPIMP size_t __cdecl _Get_preferred_copy_chunk_size(size_t _Total_copy_size_in_bytes)
_AMPIMP size_t __cdecl Concurrency::details::_Get_preferred_copy_chunk_size ( size_t  _Total_copy_size_in_bytes)
_AMPIMP _Access_mode __cdecl Concurrency::details::_Get_recommended_buffer_host_access_mode ( const accelerator_view &  _Av)
_AMPIMP std::pair<accelerator_view, accelerator_view> __cdecl Concurrency::details::_Get_src_dest_accelerator_view ( _In_opt_ const _Buffer_descriptor _SrcBuffDescPtr,
_In_opt_ const _Buffer_descriptor _DestBuffDescPtr 
)
_Access_mode Concurrency::details::_Get_synchronize_access_mode ( access_type  cpu_access_type)
inline
2004  {
2005  switch(cpu_access_type)
2006  {
2007  case access_type_auto:
2008  case access_type_read:
2009  return _Read_access;
2010  case access_type_write:
2011  return _Write_access;
2013  return _Read_write_access;
2014  case access_type_none:
2015  default:
2016  _ASSERTE(false);
2017  return _No_access;
2018  }
2019  }
Definition: amprt.h:85
Definition: amprt.h:101
#define _ASSERTE(expr)
Definition: crtdbg.h:216
Definition: amprt.h:88
Definition: amprt.h:103
Definition: amprt.h:99
Definition: amprt.h:84
Definition: amprt.h:100
Definition: amprt.h:86
template<typename _Texture_type >
_Ret_ _Texture* Concurrency::details::_Get_texture ( const _Texture_type &  _Tex)
1102  {
1103  return _Tex._Get_texture();
1104  }
template<typename _Texture_type >
const _Texture_descriptor& Concurrency::details::_Get_texture_descriptor ( const _Texture_type &  _Tex)
1096  {
1097  return _Tex._M_texture_descriptor;
1098  }
_CRTIMP2 size_t __cdecl Concurrency::details::_GetCombinableSize ( )
_CRTIMP const _CONCRT_TRACE_INFO* Concurrency::details::_GetConcRTTraceInfo ( )

Retrieves a pointer to the internal trace flags and level information for the Concurrency runtime ETW provider.

unsigned int _CRTIMP __cdecl Concurrency::details::_GetConcurrency ( )

Returns the hardware concurrency available to the Concurrency Runtime, taking into account process affinity, or any restrictions in place because of the set_task_execution_resources method.

template<typename _Ty >
_Ty Concurrency::details::_GetTaskType ( task_completion_event< _Ty >  ,
std::false_type   
)

_GetTaskType functions will retrieve task type T in task[T](Arg), for given constructor argument Arg and its property "callable". It will automatically unwrap argument to get the final return type if necessary.

template<typename _Ty >
auto Concurrency::details::_GetTaskType ( _Ty  _NonFunc,
std::false_type   
) -> decltype(_GetUnwrappedType(_NonFunc))
template<typename _Ty >
auto Concurrency::details::_GetTaskType ( _Ty  _Func,
std::true_type   
) -> decltype(_GetUnwrappedReturnType(_Func(), 0))
void Concurrency::details::_GetTaskType ( std::function< void()>  ,
std::true_type   
)
template<typename _Ty >
auto Concurrency::details::_GetUnwrappedReturnType ( _Ty  _Arg,
int   
) -> decltype(_GetUnwrappedType(_Arg))
template<typename _Ty >
_Ty Concurrency::details::_GetUnwrappedReturnType ( _Ty  ,
  ... 
)
template<typename _Ty >
_Ty Concurrency::details::_GetUnwrappedType ( task< _Ty >  )

The following type traits are used for the create_task function.

template<typename _T >
_T Concurrency::details::_Greatest_common_divisor ( _T  _M,
_T  _N 
)
inline
2733  {
2734  static_assert(std::is_unsigned<_T>::value, "This GCD function only supports unsigned integral types");
2735 
2736  _ASSERTE((_M > 0) && (_N > 0));
2737 
2738  if (_N > _M) {
2739  std::swap(_N , _M);
2740  }
2741 
2742  _T _Temp;
2743  while (_N > 0)
2744  {
2745  _Temp = _N;
2746  _N = _M % _N;
2747  _M = _Temp;
2748  }
2749 
2750  return _M;
2751  }
_N
Definition: wchar.h:1269
#define _ASSERTE(expr)
Definition: crtdbg.h:216
void swap(array< _Ty, _Size > &_Left, array< _Ty, _Size > &_Right) _NOEXCEPT_OP(_NOEXCEPT_OP(_Left.swap(_Right)))
Definition: array:429
#define _T(x)
Definition: tchar.h:2498
template<class _T >
void Concurrency::details::_InternalDeleteHelper ( _T _PObject)
287  {
288  delete _PObject;
289  }
bool Concurrency::details::_Is_cpu_accelerator ( const accelerator &  _Accl)
inline
3470  {
3471  return (_Accl.device_path == accelerator::cpu_accelerator);
3472  }
_AMPIMP bool __cdecl Concurrency::details::_Is_D3D_accelerator_view ( const accelerator_view &  _Av)
template<int _Rank, template< int > class _T>
static void Concurrency::details::_Is_nonnegative ( const _T< _Rank > &  _Tuple)
static
1176  {
1177  bool valid = true;
1178  for (int i = 0; i < _Rank; ++i)
1179  {
1180  if (_Tuple[i] < 0) {
1181  valid = false;
1182  break;
1183  }
1184  }
1185 
1186  if (!valid) {
1187  throw runtime_exception("Invalid - values for each dimension must be >= 0", E_INVALIDARG);
1188  }
1189  }
int i[4]
Definition: dvec.h:70
template<int _Rank, template< int > class _T>
static void Concurrency::details::_Is_positive ( const _T< _Rank > &  _Tuple)
static
1151  {
1152  bool valid = true;
1153  for (int i = 0; i < _Rank; ++i)
1154  {
1155  if (_Tuple[i] <= 0) {
1156  valid = false;
1157  break;
1158  }
1159  }
1160 
1161  if (!valid) {
1162  throw runtime_exception("Invalid - values for each dimension must be > 0", E_INVALIDARG);
1163  }
1164  }
int i[4]
Definition: dvec.h:70
bool Concurrency::details::_Is_valid_access_mode ( _Access_mode  _Mode)
inline
412  {
413  if ((_Mode != _Read_access) &&
414  (_Mode != _Write_access) &&
416  {
417  return false;
418  }
419 
420  return true;
421  }
_Check_return_ _In_ int _Mode
Definition: io.h:338
Definition: amprt.h:85
Definition: amprt.h:88
Definition: amprt.h:86
template<int _Rank, template< int > class _T>
static void Concurrency::details::_Is_valid_extent ( const _T< _Rank > &  _Tuple)
static
1204  {
1205  _Is_positive(_Tuple);
1206 
1207  bool totalSizeValid = true;
1208  unsigned long long totalSize = (unsigned long long)_Tuple[0];
1209 #pragma warning( push )
1210 #pragma warning( disable : 6294 )
1211  for (int i = 1; i < _Rank; ++i)
1212  {
1213  totalSize *= (unsigned long long)_Tuple[i];
1214  if (totalSize > UINT_MAX) {
1215  totalSizeValid = false;
1216  break;
1217  }
1218  }
1219 #pragma warning( pop )
1220 
1221  if (!totalSizeValid) {
1222  throw runtime_exception("Invalid - extent size exceeds UINT_MAX", E_INVALIDARG);
1223  }
1224  }
#define UINT_MAX
Definition: limits.h:41
int i[4]
Definition: dvec.h:70
static void _Is_positive(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1150
void Concurrency::details::_Is_valid_mipmap_range ( unsigned int  _Src_view_mipmap_levels,
unsigned int  _Dst_most_detailed_level,
unsigned int  _Dst_view_mipmap_levels 
)
inline
1276  {
1277  _Are_valid_mipmap_parameters(_Dst_most_detailed_level, _Dst_view_mipmap_levels);
1278 
1279  if (_Dst_view_mipmap_levels == 0 || _Src_view_mipmap_levels < _Dst_most_detailed_level + _Dst_view_mipmap_levels)
1280  {
1281  throw runtime_exception("Invalid texture mipmap range", E_INVALIDARG);
1282  }
1283  }
void _Are_valid_mipmap_parameters(unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
Definition: xxamp.h:1255
template<int _Rank, template< int > class _T1>
static void Concurrency::details::_Is_valid_projection ( int  _I,
const _T1< _Rank > &  _Base_extent 
)
static
1136  {
1137  if ((_I < 0) || (_I >= _Base_extent[0])) {
1138  throw runtime_exception("the specified projection index is out of bound", E_INVALIDARG);
1139  }
1140  }
template<int _Rank, template< int > class _T1, template< int > class _T2>
static void Concurrency::details::_Is_valid_section ( const _T2< _Rank > &  _Base_extent,
const _T1< _Rank > &  _Section_origin,
const _T2< _Rank > &  _Section_extent 
)
static
1111  {
1112  _Is_nonnegative(_Section_origin);
1113  _Is_positive(_Section_extent);
1114 
1115  for (int i = 0; i < _Rank; ++i)
1116  {
1117  if ((_Section_origin[i] + _Section_extent[i]) > _Base_extent[i]) {
1118  throw runtime_exception("the specified section index and extent are out of bound", E_INVALIDARG);
1119  }
1120  }
1121  }
int i[4]
Definition: dvec.h:70
static void _Is_nonnegative(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1175
static void _Is_positive(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1150
template<typename _Function >
auto Concurrency::details::_IsCallable ( _Function  _Func,
int   
) -> decltype(_Func(), std::true_type())
395 { (_Func); return std::true_type(); }
integral_constant< bool, true > true_type
Definition: xtr1common:47
template<typename _Function >
std::false_type Concurrency::details::_IsCallable ( _Function  ,
  ... 
)
396 { return std::false_type(); }
integral_constant< bool, false > false_type
Definition: xtr1common:48
template<typename _Function , typename _Type >
auto Concurrency::details::_IsTaskHelper ( _Type  t,
_Function  _Func,
int  ,
int   
) -> decltype(_Func(_To_task(t)), std::true_type())
template<typename _Function , typename _Type >
std::false_type Concurrency::details::_IsTaskHelper ( _Type  t,
_Function  _Func,
int  ,
  ... 
)
template<typename _ReturnType , typename _Ty >
auto Concurrency::details::_IsValidTaskCtor ( _Ty  _Param,
int  ,
int  ,
int  ,
int   
) -> decltype(_Param(), std::true_type())
template<typename _ReturnType , typename _Ty >
auto Concurrency::details::_IsValidTaskCtor ( _Ty  _Param,
int  ,
int  ,
  ... 
) -> decltype(_Param.set(stdx::declval< _ReturnType >()), std::true_type())
template<typename _ReturnType , typename _Ty >
auto Concurrency::details::_IsValidTaskCtor ( _Ty  _Param,
int  ,
  ... 
) -> decltype(_Param.set(), std::true_type())
template<typename _ReturnType , typename _Ty >
std::false_type Concurrency::details::_IsValidTaskCtor ( _Ty  _Param,
  ... 
)
template<typename _T >
_T Concurrency::details::_Least_common_multiple ( _T  _M,
_T  _N 
)
inline
2756  {
2757  static_assert(std::is_unsigned<_T>::value, "This LCM function only supports unsigned integral types");
2758 
2759  _ASSERTE((_M > 0) && (_N > 0));
2760 
2762  return ((_M / _Gcd) * _N);
2763  }
_N
Definition: wchar.h:1269
_T _Greatest_common_divisor(_T _M, _T _N)
Definition: amprt.h:2732
#define _ASSERTE(expr)
Definition: crtdbg.h:216
#define _T(x)
Definition: tchar.h:2498
Definition: ratio:103
void Concurrency::details::_LogCancelTask ( _Task_impl_base *  )
inline
1544 {}
template<typename _Type >
std::function<_Unit_type(_Type)> Concurrency::details::_MakeTToUnitFunc ( const std::function< void(_Type)> &  _Func)
2595  {
2596  return [=](_Type t) -> _Unit_type { _Func(t); return _Unit_type(); };
2597  }
unsigned char _Unit_type
Definition: ppltasks.h:247
_In_ wctype_t _Type
Definition: ctype.h:205
template<typename _Type >
std::function<_Type(_Unit_type)> Concurrency::details::_MakeUnitToTFunc ( const std::function< _Type(void)> &  _Func)
2589  {
2590  return [=](_Unit_type) -> _Type { return _Func(); };
2591  }
unsigned char _Unit_type
Definition: ppltasks.h:247
_In_ wctype_t _Type
Definition: ctype.h:205
std::function<_Unit_type(_Unit_type)> Concurrency::details::_MakeUnitToUnitFunc ( const std::function< void(void)> &  _Func)
inline
2600  {
2601  return [=](_Unit_type) -> _Unit_type { _Func(); return _Unit_type(); };
2602  }
unsigned char _Unit_type
Definition: ppltasks.h:247
std::function<_Unit_type(void)> Concurrency::details::_MakeVoidToUnitFunc ( const std::function< void(void)> &  _Func)
inline
2583  {
2584  return [=]() -> _Unit_type { _Func(); return _Unit_type(); };
2585  }
unsigned char _Unit_type
Definition: ppltasks.h:247
template<int _Rank, typename _Kernel_type >
void Concurrency::details::_Parallel_for_each ( _In_ _Host_Scheduling_info *  _Sch_info,
extent< _Rank >  _Compute_domain,
const _Kernel_type &  _F 
)
template<int _Dim0, int _Dim1, int _Dim2, typename _Kernel_type >
void Concurrency::details::_Parallel_for_each ( _In_ _Host_Scheduling_info *  _Sch_info,
tiled_extent< _Dim0, _Dim1, _Dim2 >  _Compute_domain,
const _Kernel_type &  _F 
)
template<int _Dim0, int _Dim1, typename _Kernel_type >
void Concurrency::details::_Parallel_for_each ( _In_ _Host_Scheduling_info *  _Sch_info,
tiled_extent< _Dim0, _Dim1 >  _Compute_domain,
const _Kernel_type &  _F 
)
template<int _Dim0, typename _Kernel_type >
void Concurrency::details::_Parallel_for_each ( _In_ _Host_Scheduling_info *  _Sch_info,
tiled_extent< _Dim0 >  _Compute_domain,
const _Kernel_type &  _F 
)
template<class _Mylist >
_Solist_const_iterator<_Mylist>& Concurrency::details::_Rechecked ( _Solist_const_iterator< _Mylist > &  _Iterator,
typename _Solist_const_iterator< _Mylist >::_Unchecked_type  _Right 
)
inline
107 {
108  return (_Iterator._Rechecked(_Right));
109 }
const _Ty & _Right
Definition: algorithm:4087
template<class _Mylist >
_Solist_iterator<_Mylist>& Concurrency::details::_Rechecked ( _Solist_iterator< _Mylist > &  _Iterator,
typename _Solist_iterator< _Mylist >::_Unchecked_type  _Right 
)
inline
189 {
190  return (_Iterator._Rechecked(_Right));
191 }
const _Ty & _Right
Definition: algorithm:4087
_AMPIMP HRESULT __cdecl Concurrency::details::_Recursive_array_copy ( const _Array_copy_desc _Desc,
unsigned int  _Native_copy_rank,
std::function< HRESULT(const _Array_copy_desc &_Reduced)>  _Native_copy_func 
)
_AMPIMP void __cdecl Concurrency::details::_Register_async_event ( const _Event _Ev,
const std::shared_future< void > &  _Shared_future 
)
void Concurrency::details::_RegisterConcRTEventTracing ( )

Register ConcRT as an ETW Event Provider.

_CRTIMP void __cdecl Concurrency::details::_ReportUnobservedException ( )
template<typename _Function , typename _Type >
auto Concurrency::details::_ReturnTypeHelper ( _Type  t,
_Function  _Func,
int  ,
int   
) -> decltype(_Func(_To_task(t)))
template<typename _Function , typename _Type >
auto Concurrency::details::_ReturnTypeHelper ( _Type  t,
_Function  _Func,
int  ,
  ... 
) -> decltype(_Func(t))
template<typename _Function , typename _Type >
auto Concurrency::details::_ReturnTypeHelper ( _Type  t,
_Function  _Func,
  ... 
) -> _BadContinuationParamType
unsigned char Concurrency::details::_Reverse_byte ( unsigned char  _Original_byte)
inline
65 {
66  // return ((_Original_byte * 0x80200802ULL) & 0x0884422110ULL) * 0x0101010101ULL >> 32;
67  return _Byte_reverse_table[_Original_byte];
68 }
_CRTIMP2 const unsigned char _Byte_reverse_table[]
static void Concurrency::details::_ScheduleFuncWithAutoInline ( const std::function< void()> &  _Func,
_TaskInliningMode_t  _InliningMode 
)
static

Schedule a functor with automatic inlining. Note that this is "fire and forget" scheduling, which cannot be waited on or canceled after scheduling. This schedule method will perform automatic inlining base on .

Parameters
_FuncThe user functor need to be scheduled.
_InliningModeThe inlining scheduling policy for current functor.
534  {
535  _TaskCollection_t::_RunTask(&_TaskProcThunk::_Bridge, new _TaskProcThunk(_Func), _InliningMode);
536  }
_AMPIMP accelerator __cdecl Concurrency::details::_Select_default_accelerator ( )
_AMPIMP bool __cdecl Concurrency::details::_Set_default_accelerator ( _Accelerator_impl_ptr  _Accl)
_CRTIMP void __cdecl Concurrency::details::_SetUnobservedExceptionHandler ( _UnobservedExceptionHandler  )
template<typename _Type >
task<_Type> Concurrency::details::_To_task ( _Type  t)
task<void> Concurrency::details::_To_task ( )
template<class _Type >
__int64 Concurrency::details::_Trace_agents_get_id ( _Type _PObject)
435  {
436  return reinterpret_cast<__int64>(_PObject);
437  }
template<class _Mylist >
_Solist_const_iterator<_Mylist>::_Unchecked_type Concurrency::details::_Unchecked ( _Solist_const_iterator< _Mylist >  _Iterator)
inline
100 {
101  return (_Iterator._Unchecked());
102 }
template<class _Mylist >
_Solist_iterator<_Mylist>::_Unchecked_type Concurrency::details::_Unchecked ( _Solist_iterator< _Mylist >  _Iterator)
inline
182 {
183  return (_Iterator._Unchecked());
184 }
void _CRTIMP __cdecl Concurrency::details::_UnderlyingYield ( )

Default method for yielding during a spin wait

void Concurrency::details::_UnregisterConcRTEventTracing ( )

Unregister ConcRT as an ETW Event Provider.

template<typename _ReturnType , typename _Ty >
void Concurrency::details::_ValidateTaskConstructorArgs ( _Ty  _Param)
3008  {
3009  static_assert(std::is_same<decltype(_IsValidTaskCtor<_ReturnType>(_Param,0,0,0,0)),std::true_type>::value,
3010 #if defined (__cplusplus_winrt)
3011  "incorrect argument for task constructor; can be a callable object, an asynchronous operation, or a task_completion_event"
3012 #else /* defined (__cplusplus_winrt) */
3013  "incorrect argument for task constructor; can be a callable object or a task_completion_event"
3014 #endif /* defined (__cplusplus_winrt) */
3015  );
3016 #if defined (__cplusplus_winrt)
3017  static_assert(!(std::is_same<_Ty,_ReturnType>::value && details::_IsIAsyncInfo<_Ty>::_Value),
3018  "incorrect template argument for task; consider using the return type of the async operation");
3019 #endif /* defined (__cplusplus_winrt) */
3020  }
_CRTIMP _In_ int _Value
Definition: setjmp.h:190
integral_constant< bool, true > true_type
Definition: xtr1common:47
template<typename _Function >
auto Concurrency::details::_VoidIsTaskHelper ( _Function  _Func,
int  ,
int   
) -> decltype(_Func(_To_task()), std::true_type())
template<typename _Function >
std::false_type Concurrency::details::_VoidIsTaskHelper ( _Function  _Func,
int  ,
  ... 
)
template<typename _Function >
auto Concurrency::details::_VoidReturnTypeHelper ( _Function  _Func,
int  ,
int   
) -> decltype(_Func(_To_task()))
template<typename _Function >
auto Concurrency::details::_VoidReturnTypeHelper ( _Function  _Func,
int  ,
  ... 
) -> decltype(_Func())
template<typename _T >
_T Concurrency::details::atomic_add ( std::atomic< _T > &  _Target,
_T  value 
)
173 {
174  return _Target.fetch_add(value) + value;
175 }
template<typename _T >
_T Concurrency::details::atomic_compare_exchange ( std::atomic< _T > &  _Target,
_T  _Exchange,
_T  _Comparand 
)
147 {
148  _T _Result = _Comparand;
149  _Target.compare_exchange_strong(_Result, _Exchange);
150  return _Result;
151 }
#define _T(x)
Definition: tchar.h:2498
template<typename _T >
_T Concurrency::details::atomic_decrement ( std::atomic< _T > &  _Target)
167 {
168  return _Target.fetch_sub(1) - 1;
169 }
template<typename _T >
_T Concurrency::details::atomic_exchange ( std::atomic< _T > &  _Target,
_T  _Value 
)
155 {
156  return _Target.exchange(_Value);
157 }
_CRTIMP _In_ int _Value
Definition: setjmp.h:190
template<typename _T >
_T Concurrency::details::atomic_increment ( std::atomic< _T > &  _Target)
161 {
162  return _Target.fetch_add(1) + 1;
163 }
template<typename _C , typename _Ty , typename _U >
bool Concurrency::details::operator!= ( const _Concurrent_queue_iterator< _C, _Ty > &  _I,
const _Concurrent_queue_iterator< _C, _U > &  _J 
)
324  {
325  return _I._My_item!=_J._My_item;
326  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator!= ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
349  {
350  return !(_I==_J);
351  }
template<typename _Container , typename _Ty >
_Vector_iterator<_Container,_Ty> Concurrency::details::operator+ ( ptrdiff_t  _Offset,
const _Vector_iterator< _Container, _Ty > &  _Vec 
)
337  {
338  return _Vector_iterator<_Container,_Ty>( *_Vec._My_vector, _Vec._My_index+_Offset );
339  }
_Check_return_opt_ _In_ long _Offset
Definition: io.h:334
Definition: vector:287
template<typename _Container , typename _Ty , typename _U >
ptrdiff_t Concurrency::details::operator- ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
379  {
380  return ptrdiff_t(_I._My_index)-ptrdiff_t(_J._My_index);
381  }
_W64 int ptrdiff_t
Definition: crtdefs.h:530
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator< ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
355  {
356  return _I._My_index<_J._My_index && _I._My_vector == _J._My_vector;
357  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator<= ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
373  {
374  return !(_J<_I);
375  }
template<typename _C , typename _Ty , typename _U >
bool Concurrency::details::operator== ( const _Concurrent_queue_iterator< _C, _Ty > &  _I,
const _Concurrent_queue_iterator< _C, _U > &  _J 
)
318  {
319  return _I._My_item==_J._My_item;
320  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator== ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
343  {
344  return _I._My_index==_J._My_index && _I._My_vector == _J._My_vector;
345  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator> ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
361  {
362  return _J<_I;
363  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator>= ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
367  {
368  return !(_I<_J);
369  }

Variable Documentation

_CRTIMP2 const unsigned char Concurrency::details::_Byte_reverse_table[]
const size_t Concurrency::details::ERROR_MSG_BUFFER_SIZE = 1024
const int Concurrency::details::LOOP_UNROLL_THRESHOLD = 4
static