STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Namespaces | Classes | Typedefs | Enumerations | Functions | Variables
Concurrency::details Namespace Reference

Namespaces

 platform
 
 std
 

Classes

class  _Accelerator_view_hasher
 
class  _Aggregated_operation
 _Aggregated_operation base class More...
 
class  _Aggregator
 An aggregator for collecting operations coming from multiple sources and executing them serially on a single thread. _Operation_type must be derived from _Aggregated_operation. The parameter _Handler_type is a functor that will be passed the list of operations and is expected to handle each operation appropriately, setting the status of each operation to non-zero. More...
 
class  _Allocator_base
 
struct  _AllocBase
 
class  _Amp_runtime_trace
 
class  _AnonymousOriginator
 
struct  _arithmetic_op_helper
 
struct  _arithmetic_op_helper< _T, opAdd >
 
struct  _arithmetic_op_helper< _T, opDiv >
 
struct  _arithmetic_op_helper< _T, opMod >
 
struct  _arithmetic_op_helper< _T, opMul >
 
struct  _arithmetic_op_helper< _T, opSub >
 
struct  _arithmetic_op_loop_helper
 
struct  _arithmetic_op_loop_helper< _T, _Kind, 1, false >
 
struct  _arithmetic_op_loop_helper< _T, _Kind, _Rank, false >
 
struct  _arithmetic_op_loop_helper< _T, _Kind, _Rank, true >
 
struct  _Array_copy_desc
 
struct  _Array_flatten_helper
 
struct  _Array_flatten_helper< 1, _T1, _T2 >
 
struct  _Array_flatten_helper< 2, _T1, _T2 >
 
struct  _Array_flatten_helper< 3, _T1, _T2 >
 
struct  _Array_init_helper
 
struct  _Array_init_helper< _T1, _T2, 1 >
 
struct  _Array_init_helper< _T1, _T2, 2 >
 
struct  _Array_init_helper< _T1, _T2, 3 >
 
struct  _Array_init_helper< _T1, _T2, 4 >
 
class  _Array_projection_helper
 
class  _Array_projection_helper< _T, 1 >
 
class  _Array_view_base
 
class  _Array_view_projection_helper
 
class  _Array_view_projection_helper< _T, 1 >
 
class  _Array_view_shape
 
class  _AsyncOriginator
 
class  _AsyncTaskCollection
 Async Task collections is a thin wrapper over task collection to cater to the execution of asynchronous chores (or tasks defined in ppltasks.h). Specifically, they manage their own lifetime by using reference counts. Scheduling a chore acquires a reference and on completion of its execution the reference is released. More...
 
struct  _BadArgType
 
struct  _BadContinuationParamType
 
struct  _Beacon_reference
 Internal maintenance structure for beacons. More...
 
class  _Buffer
 
struct  _Buffer_descriptor
 
class  _Cancellation_beacon
 A cancellation beacon is a flag which can be polled in an inlinable fashion using the is_signaled method in lieu of polling on the more expensive non inlinable is_current_task_group_canceling method. More...
 
class  _CancellationTokenCallback
 
class  _CancellationTokenRegistration
 
class  _CancellationTokenState
 
struct  _Chore
 
struct  _cmp_op_helper
 
struct  _cmp_op_helper< _T, opEq >
 
struct  _cmp_op_helper< _T, opNeq >
 
struct  _cmp_op_loop_helper
 
struct  _cmp_op_loop_helper< _T, _Kind, 1, false >
 
struct  _cmp_op_loop_helper< _T, _Kind, _Rank, false >
 
struct  _cmp_op_loop_helper< _T, _Kind, _Rank, true >
 
struct  _compound_assign_op_helper
 
struct  _compound_assign_op_helper< _T, opAddEq >
 
struct  _compound_assign_op_helper< _T, opAssign >
 
struct  _compound_assign_op_helper< _T, opDivEq >
 
struct  _compound_assign_op_helper< _T, opModEq >
 
struct  _compound_assign_op_helper< _T, opMulEq >
 
struct  _compound_assign_op_helper< _T, opSubEq >
 
struct  _compound_assign_op_loop_helper
 
struct  _compound_assign_op_loop_helper< _T, _Kind, 1, false >
 
struct  _compound_assign_op_loop_helper< _T, _Kind, _Rank, false >
 
struct  _compound_assign_op_loop_helper< _T, _Kind, _Rank, true >
 
struct  _compound_assign_op_loop_helper< _T, opAssign, 1, false >
 
struct  _compound_assign_op_loop_helper< _T, opAssign, 2, true >
 
struct  _compound_assign_op_loop_helper< _T, opAssign, 3, true >
 
struct  _compound_assign_op_loop_helper< _T, opAssign, 4, true >
 
struct  _CONCRT_TRACE_INFO
 
class  _Concurrent_hash
 
class  _Concurrent_queue_base_v4
 
class  _Concurrent_queue_iterator
 
class  _Concurrent_queue_iterator_base_v4
 
class  _Concurrent_queue_rep
 
class  _Concurrent_unordered_map_traits
 
class  _Concurrent_unordered_set_traits
 
class  _Concurrent_vector_base_v4
 
class  _Const_array_projection_helper
 
class  _Const_array_projection_helper< _T, 1 >
 
class  _Const_array_view_projection_helper
 
class  _Const_array_view_projection_helper< _T, 1 >
 
struct  _contains
 
struct  _contains< _EXT, _IDX, 1 >
 
struct  _contains< _EXT, _IDX, 2 >
 
struct  _contains< _EXT, _IDX, 3 >
 
class  _Context
 
class  _ContextCallback
 
struct  _ContinuationArgTypeHelper
 
struct  _ContinuationArgTypeHelper< _Ty, std::true_type >
 
struct  _ContinuationTaskHandleBase
 
struct  _ContinuationTypeTraits
 
class  _CurrentScheduler
 
class  _D3D_interop
 
class  _DefaultPPLTaskScheduler
 
struct  _DefaultTaskHelper
 
struct  _DPC_call_handle
 
class  _Dynamic_array
 
class  _Event
 
struct  _ExceptionHolder
 
struct  _Falsifier
 
struct  _FunctionTypeTraits
 
struct  _FunctionTypeTraits< _Function, void >
 
class  _Hash_compare
 
struct  _Host_Scheduling_info
 
struct  _index_helper
 
struct  _InitFunctorTypeTraits
 
struct  _InitFunctorTypeTraits< _Ty, _Ty >
 
struct  _Internal_task_options
 
class  _Interruption_exception
 
struct  _Is_container
 
struct  _Is_extent_or_index
 
struct  _Is_extent_or_index< extent< N > >
 
struct  _Is_extent_or_index< index< N > >
 
struct  _IsIAsyncInfo
 
struct  _IsUnwrappedAsyncSelector
 
struct  _IsUnwrappedAsyncSelector< _TypeSelectorNoAsync >
 
class  _MallocaArrayHolder
 
class  _MallocaListHolder
 
struct  _map_index
 
struct  _map_index< _T1, 1 >
 
struct  _map_index< _T1, 2 >
 
struct  _map_index< _T1, 3 >
 
struct  _map_index< _T1, 4 >
 
struct  _Micro_queue
 
class  _NonReentrantBlockingLock
 
class  _NonReentrantPPLLock
 
struct  _NormalizeVoidToUnitType
 
struct  _NormalizeVoidToUnitType< void >
 
class  _Originator
 
struct  _PPLTaskHandle
 The _PPLTaskHandle is the strong-typed task handle base. All user task functions need to be wrapped in this task handler to be executable by PPL. By deriving from a different _BaseTaskHandle, it can be used for both initial tasks and continuation tasks. For initial tasks, _PPLTaskHandle will be derived from _UnrealizedChore_t, and for continuation tasks, it will be derived from _ContinuationTaskHandleBase. The life time of the _PPLTaskHandle object is be managed by runtime if task handle is scheduled. More...
 
struct  _product_helper
 
struct  _product_helper< _T, 1, false >
 
struct  _product_helper< _T, _Rank, false >
 
struct  _product_helper< _T, _Rank, true >
 
class  _ProgressReporterCtorArgType
 
struct  _project0
 
struct  _project0< _RES_EXT, _SRC_EXT, _RES_IDX, _SRC_IDX, 2 >
 
struct  _project0< _RES_EXT, _SRC_EXT, _RES_IDX, _SRC_IDX, 3 >
 
class  _Projection_result_type
 
class  _Projection_result_type< _T, 1 >
 
class  _Queue
 
class  _ReaderWriterLock
 
class  _ReentrantBlockingLock
 
class  _ReentrantLock
 
class  _ReentrantPPLLock
 
class  _RefCounter
 
class  _RefCounterBase
 
class  _Reference_counted_obj_ptr
 
class  _Reference_counter
 
struct  _ResultHolder
 
struct  _ResultHolder< std::vector< bool > >
 
class  _Runtime_object
 
class  _Sampler
 
struct  _Sampler_descriptor
 
class  _Scheduler
 
class  _Solist_const_iterator
 
class  _Solist_iterator
 
struct  _SpinCount
 
class  _SpinLock
 
class  _SpinWait
 Implements busy wait with no backoff More...
 
class  _Split_order_list_node
 
class  _Split_order_list_value
 
class  _Split_ordered_list
 
class  _StackGuard
 RAII wrapper used to maintain and limit ppltask maximum inline schedule depth. This class will keep a reference to the depth slot on current context. More...
 
class  _StructuredTaskCollection
 Structured task collections represent groups of work which follow a strictly LIFO ordered paradigm queueing and waiting respectively. They can only be waited on once and can only be used from a single thread of execution. More...
 
class  _Subatomic
 
struct  _Subatomic_impl
 
struct  _Subatomic_impl< 4 >
 
class  _SyncOriginator
 
struct  _Task_completion_event_impl
 
struct  _Task_generator_oversubscriber
 
struct  _Task_impl
 The implementation of a first-class task. This structure contains the task group used to execute the task function and handles the scheduling. The _Task_impl is created as a shared_ptr member of the public task class, so its destruction is handled automatically. More...
 
struct  _Task_impl_base
 The base implementation of a first-class task. This class contains all the non-type specific implementation details of the task. More...
 
struct  _Task_ptr
 
class  _TaskCollection
 Task collections represent groups of work which step outside the strict structuring of the _StructuredTaskCollection definition. Any groups of work which do not follow LIFO ordering, are waited on multiple times, or are passed between arbitrary threads require utilization of this definition of a task collection. It has additional overhead over the _StructuredTaskCollection. More...
 
class  _TaskCollectionBase
 
class  _TaskCollectionBaseImpl
 
class  _TaskCreationCallstack
 Callstack container, which is used to capture and preserve callstacks in ppltasks. Members of this class is examined by vc debugger, thus there will be no public access methods. Please note that names of this class should be kept stable for debugger examining. More...
 
struct  _TaskEventLogger
 
struct  _TaskProcHandle
 
struct  _TaskProcThunk
 Helper object used for LWT invocation. More...
 
struct  _TaskTypeFromParam
 
struct  _TaskTypeTraits
 
struct  _TaskTypeTraits< void >
 
struct  _TaskWorkItemRAIILogger
 
class  _Texture
 
struct  _Texture_descriptor
 
struct  _Texture_predefined_sample_helper
 
struct  _Texture_predefined_sample_helper< _T, 1 >
 
struct  _Texture_predefined_sample_helper< _T, 2 >
 
struct  _Texture_predefined_sample_helper< _T, 3 >
 
struct  _Texture_read_helper
 
struct  _Texture_read_helper< _T, 1 >
 
struct  _Texture_read_helper< _T, 2 >
 
struct  _Texture_read_helper< _T, 3 >
 
struct  _Texture_sample_helper
 
struct  _Texture_sample_helper< _T, 1 >
 
struct  _Texture_sample_helper< _T, 2 >
 
struct  _Texture_sample_helper< _T, 3 >
 
struct  _Texture_write_helper
 
struct  _Texture_write_helper< _T, 1 >
 
struct  _Texture_write_helper< _T, 2 >
 
struct  _Texture_write_helper< _T, 3 >
 
struct  _ThenImplOptions
 
struct  _Threadpool_chore
 
class  _Threadpool_task
 
class  _Timer
 
struct  _TypeSelectorAsyncAction
 
struct  _TypeSelectorAsyncActionWithProgress
 
struct  _TypeSelectorAsyncOperation
 
struct  _TypeSelectorAsyncOperationOrTask
 
struct  _TypeSelectorAsyncOperationWithProgress
 
struct  _TypeSelectorAsyncTask
 
struct  _TypeSelectorNoAsync
 
class  _Ubiquitous_buffer
 
class  _UnrealizedChore
 
struct  _UnwrapTaskType
 
struct  _UnwrapTaskType< task< _Ty > >
 
class  _Vector_iterator
 
class  _View_shape
 
class  CancellationTokenRegistration_TaskProc
 

Typedefs

typedef enum _Short_vector_base_type_id _Texture_base_type_id
 
typedef _Buffer_descriptor_View_key
 
typedef struct Concurrency::details::_Buffer_descriptor _Buffer_descriptor
 
typedef struct Concurrency::details::_Texture_descriptor _Texture_descriptor
 
typedef struct Concurrency::details::_Sampler_descriptor _Sampler_descriptor
 
typedef std::unordered_set< accelerator_view, _Accelerator_view_hasher_Accelerator_view_unordered_set
 
typedef _SpinWait _SpinWaitBackoffNone
 
typedef _SpinWait< 0 > _SpinWaitNoYield
 
typedef size_t _Ticket
 
typedef _Concurrent_queue_base_v4 _Concurrent_queue_base
 
typedef _Concurrent_queue_iterator_base_v4 concurrent_queue_iterator_base
 
typedef _Concurrent_vector_base_v4 _Concurrent_vector_base
 
typedef size_t _Map_key
 
typedef _Map_key _Split_order_key
 
typedef std::atomic< long > atomic_long
 Atomics More...
 
typedef std::atomic< size_tatomic_size_t
 
typedef unsigned char _Unit_type
 
typedef _TaskCollection_t::_TaskProcHandle_t _UnrealizedChore_t
 
typedef std::shared_ptr< _Task_impl_base_Task_ptr_base
 
typedef void(__cdecl * _Threadpool_callback) (void *)
 
typedef _TaskCollectionBaseImpl _TaskCollection_t
 
typedef _TaskInliningMode _TaskInliningMode_t
 
typedef _Task_generator_oversubscriber _Task_generator_oversubscriber_t
 

Enumerations

enum  _Short_vector_base_type_id : unsigned int {
  _Uint_type = 0, _Int_type = 1, _Float_type = 2, _Unorm_type = 3,
  _Norm_type = 4, _Double_type = 5, _Invalid_type = 0xFFFFFFFF
}
 
enum  _TaskCollectionStatus { _NotComplete, _Completed, _Canceled }
 
enum  _TaskInliningMode { _NoInline = 0, _DefaultAutoInline = 16, _ForceInline = -1 }
 The enum defines inlining scheduling policy for ppltasks. Scheduling a chore or a functor with _TaskInliningMode will give scheduler a hint on whether apply inline execution or not. More...
 
enum  _eInitializeState { _do_not_initialize }
 
enum  _op_kind {
  opEq, opNeq, opNot, opAssign,
  opAddEq, opSubEq, opMulEq, opDivEq,
  opModEq, opAdd, opSub, opMul,
  opDiv, opMod
}
 

Functions

template<class _Type >
__int64 _Trace_agents_get_id (_Type *_PObject)
 
template<int _Old_element_size, int _New_element_size>
int _Calculate_reinterpreted_size (int _Old_size) __GPU_ONLY
 
template<typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array< _Value_type, _Rank > &_Src, array< _Value_type, _Rank > &_Dest)
 
template<typename InputIterator , typename _Value_type , int _Rank>
_Event _Copy_async_impl (InputIterator _SrcFirst, InputIterator _SrcLast, array< _Value_type, _Rank > &_Dest)
 
template<typename OutputIterator , typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array< _Value_type, _Rank > &_Src, OutputIterator _DestIter)
 
template<typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array< _Value_type, _Rank > &_Src, const array_view< _Value_type, _Rank > &_Dest)
 
template<typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array_view< const _Value_type, _Rank > &_Src, array< _Value_type, _Rank > &_Dest)
 
template<typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array_view< const _Value_type, _Rank > &_Src, const array_view< _Value_type, _Rank > &_Dest)
 
template<typename InputIterator , typename _Value_type , int _Rank>
_Event _Copy_async_impl (InputIterator _SrcFirst, InputIterator _SrcLast, const array_view< _Value_type, _Rank > &_Dest)
 
template<typename OutputIterator , typename _Value_type , int _Rank>
_Event _Copy_async_impl (const array_view< _Value_type, _Rank > &_Src, OutputIterator _DestIter)
 
_Ret_ _Accelerator_view_impl * _Get_accelerator_view_impl_ptr (const accelerator_view &_Accl_view)
 
_Ret_ _Accelerator_impl * _Get_accelerator_impl_ptr (const accelerator &_Accl)
 
_Event _Get_access_async (const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
 
unsigned int _Get_mipmap_levels (const _Texture *_Tex)
 
bool _Is_valid_access_mode (_Access_mode _Mode)
 
_AMPIMP size_t __cdecl _Get_num_devices ()
 
_AMPIMP _Ret_ _Accelerator_impl_ptr *__cdecl _Get_devices ()
 
_AMPIMP accelerator __cdecl _Select_default_accelerator ()
 
_AMPIMP bool __cdecl _Set_default_accelerator (_Accelerator_impl_ptr _Accl)
 
_AMPIMP bool __cdecl _Is_D3D_accelerator_view (const accelerator_view &_Av)
 
_AMPIMP void __cdecl _Register_async_event (const _Event &_Ev, const std::shared_future< void > &_Shared_future)
 
_AMPIMP _Access_mode __cdecl _Get_recommended_buffer_host_access_mode (const accelerator_view &_Av)
 
_Ret_ _View_shape_Create_reinterpreted_shape (const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
 
_Access_mode _Get_synchronize_access_mode (access_type cpu_access_type)
 
access_type _Get_cpu_access_type (_Access_mode _Cpu_access_mode)
 
_AMPIMP _Event __cdecl _Copy_impl (_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
 
_AMPIMP _Event __cdecl _Copy_async_impl (_In_ _Texture *_Src_tex, const size_t *_Src_offset, unsigned int _Src_mipmap_level, _Out_ _Texture *_Dst_tex, const size_t *_Dst_offset, unsigned int _Dst_mipmap_level, const size_t *_Copy_extent, const size_t *_Preferred_copy_chunk_extent=NULL)
 
bool _Get_chunked_staging_texture (_In_ _Texture *_Tex, const size_t *_Copy_chunk_extent, _Inout_ size_t *_Remaining_copy_extent, _Out_ size_t *_Curr_copy_extent, _Out_ _Texture_ptr *_Staging_texture)
 
template<typename _Input_iterator , typename _Value_type >
void _Copy_data_on_host_src_iter (int _Rank, _Input_iterator _Src, _Out_ _Value_type *_Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Dst_row_pitch_in_bytes, size_t _Dst_depth_pitch_in_bytes, size_t _Src_row_pitch, size_t _Src_depth_pitch)
 
template<typename _Output_iterator , typename _Value_type >
void _Copy_data_on_host_dst_iter (int _Rank, const _Value_type *_Src, _Output_iterator _Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Src_row_pitch_in_bytes, size_t _Src_depth_pitch_in_bytes, size_t _Dst_row_pitch, size_t _Dst_depth_pitch)
 
_AMPIMP size_t __cdecl _Get_preferred_copy_chunk_size (size_t _Total_copy_size_in_bytes)
 
size_t _Get_preferred_copy_chunk_num_elems (size_t _Total_num_elems, size_t _Elem_size)
 
void _Get_preferred_copy_chunk_extent (unsigned int _Rank, size_t _Width, size_t _Height, size_t _Depth, size_t _Bits_per_element, _Out_writes_(3) size_t *_Preferred_copy_chunk_extent)
 
template<typename _T >
_T _Greatest_common_divisor (_T _M, _T _N)
 
template<typename _T >
_T _Least_common_multiple (_T _M, _T _N)
 
template<typename InputIterator , typename _Value_type >
_Event _Copy_impl (InputIterator _SrcFirst, InputIterator _SrcLast, size_t _NumElemsToCopy, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Preferred_copy_chunk_num_elems=0)
 
template<typename _InputIterator , typename _Distance >
std::enable_if< std::is_base_of< std::input_iterator_tag, typename std::iterator_traits< _InputIterator >::iterator_category >::value >::type _Advance_output_iterator (_InputIterator &_Iter, _Distance _N)
 
template<typename _OutputIterator , typename _Distance >
std::enable_if<!std::is_base_of< std::input_iterator_tag, typename std::iterator_traits< _OutputIterator >::iterator_category >::value >::type _Advance_output_iterator (_OutputIterator &_Iter, size_t _N)
 
template<typename OutputIterator , typename _Value_type >
_Event _Copy_impl (_In_ _Buffer *_Src, size_t _Src_offset, size_t _Num_elems, OutputIterator _DestIter, size_t _Preferred_copy_chunk_num_elems=0)
 
_AMPIMP _Event __cdecl _Copy_impl (_In_ _Buffer *_Src, _View_shape_ptr _Src_shape, _Out_ _Buffer *_Dst, _View_shape_ptr _Dst_shape)
 
_AMPIMP HRESULT __cdecl _Recursive_array_copy (const _Array_copy_desc &_Desc, unsigned int _Native_copy_rank, std::function< HRESULT(const _Array_copy_desc &_Reduced)> _Native_copy_func)
 
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view (_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
 
template<typename _InputInterator , typename _OutputIterator >
_Event _Copy_impl_iter (_InputInterator _SrcFirst, _InputInterator _SrcLast, _OutputIterator _DstFirst)
 
template<typename InputIterator , typename _Value_type >
_Event _Copy_impl (InputIterator _SrcFirst, _View_shape_ptr _Src_shape, _Inout_ _Buffer *_Dst, _View_shape_ptr _Dst_shape)
 
template<typename OutputIterator , typename _Value_type >
_Event _Copy_impl (_In_ _Buffer *_Src, _View_shape_ptr _Src_shape, OutputIterator _DestIter, _View_shape_ptr _Dst_shape)
 
template<typename _InputInterator , typename _OutputIterator >
_Event _Copy_impl_iter (_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
 
_Ret_ _View_shape_Get_buffer_view_shape (const _Buffer_descriptor &_Descriptor)
 
bool _Is_cpu_accelerator (const accelerator &_Accl)
 
_AMPIMP _Ret_ _Amp_runtime_trace *__cdecl _Get_amp_trace ()
 
template<class _Ty >
void _InternalDeleteHelper (_Ty *_PObject)
 
void _CONCRTIMP __cdecl _UnderlyingYield ()
 Default method for yielding during a spin wait More...
 
unsigned int _CONCRTIMP __cdecl _GetConcurrency ()
 Returns the hardware concurrency available to the Concurrency Runtime, taking into account process affinity, or any restrictions in place because of the set_task_execution_resources method. More...
 
_CONCRTIMP const _CONCRT_TRACE_INFO_GetConcRTTraceInfo ()
 Retrieves a pointer to the internal trace flags and level information for the Concurrency runtime ETW provider. More...
 
void _RegisterConcRTEventTracing ()
 Register ConcRT as an ETW Event Provider. More...
 
void _UnregisterConcRTEventTracing ()
 Unregister ConcRT as an ETW Event Provider. More...
 
template<typename _C , typename _Ty , typename _U >
bool operator== (const _Concurrent_queue_iterator< _C, _Ty > &_I, const _Concurrent_queue_iterator< _C, _U > &_J)
 
template<typename _C , typename _Ty , typename _U >
bool operator!= (const _Concurrent_queue_iterator< _C, _Ty > &_I, const _Concurrent_queue_iterator< _C, _U > &_J)
 
template<typename _Container , typename _Ty >
_Vector_iterator< _Container, _Ty > operator+ (ptrdiff_t _Offset, const _Vector_iterator< _Container, _Ty > &_Vec)
 
template<typename _Container , typename _Ty , typename _U >
bool operator== (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator!= (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator< (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator> (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator>= (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
bool operator<= (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
template<typename _Container , typename _Ty , typename _U >
ptrdiff_t operator- (const _Vector_iterator< _Container, _Ty > &_I, const _Vector_iterator< _Container, _U > &_J)
 
unsigned char _Reverse_byte (unsigned char _Original_byte)
 
unsigned char _Get_msb (size_t _Mask)
 
template<class _Mylist >
_Solist_const_iterator< _Mylist >::_Unchecked_type _Unchecked (_Solist_const_iterator< _Mylist > _Iterator)
 
template<class _Mylist >
_Solist_const_iterator< _Mylist > & _Rechecked (_Solist_const_iterator< _Mylist > &_Iterator, typename _Solist_const_iterator< _Mylist >::_Unchecked_type _Right)
 
template<class _Mylist >
_Solist_iterator< _Mylist >::_Unchecked_type _Unchecked (_Solist_iterator< _Mylist > _Iterator)
 
template<class _Mylist >
_Solist_iterator< _Mylist > & _Rechecked (_Solist_iterator< _Mylist > &_Iterator, typename _Solist_iterator< _Mylist >::_Unchecked_type _Right)
 
_CONCRTIMP size_t __cdecl _GetCombinableSize ()
 
template<typename _T >
_T atomic_compare_exchange (std::atomic< _T > &_Target, _T _Exchange, _T _Comparand)
 
template<typename _T >
_T atomic_exchange (std::atomic< _T > &_Target, _T _Value)
 
template<typename _T >
_T atomic_increment (std::atomic< _T > &_Target)
 
template<typename _T >
_T atomic_decrement (std::atomic< _T > &_Target)
 
template<typename _T >
_T atomic_add (std::atomic< _T > &_Target, _T _Value)
 
template<class _Ty >
_Ty && declval ()
 
template<typename _Ty >
_TypeSelectorAsyncTask _AsyncOperationKindSelector (task< _Ty >)
 
_TypeSelectorNoAsync _AsyncOperationKindSelector (...)
 
template<typename _Function >
auto _IsCallable (_Function _Func, int) -> decltype(_Func(), std::true_type())
 
template<typename _Function >
std::false_type _IsCallable (_Function,...)
 
template<typename _Function , typename _Type >
auto _ReturnTypeHelper (_Type _Obj, _Function _Func, int, int) -> decltype(_Func(std::declval< task< _Type >>()))
 
template<typename _Function , typename _Type >
auto _ReturnTypeHelper (_Type _Obj, _Function _Func, int,...) -> decltype(_Func(_Obj))
 
template<typename _Function , typename _Type >
auto _ReturnTypeHelper (_Type _Obj, _Function _Func,...) -> _BadContinuationParamType
 
template<typename _Function , typename _Type >
auto _IsTaskHelper (_Type _Obj, _Function _Func, int, int) -> decltype(_Func(std::declval< task< _Type >>()), std::true_type())
 
template<typename _Function , typename _Type >
auto _IsTaskHelper (_Type _Obj, _Function _Func, int,...) -> std::false_type
 
template<typename _Function >
auto _VoidReturnTypeHelper (_Function _Func, int, int) -> decltype(_Func(std::declval< task< void >>()))
 
template<typename _Function >
auto _VoidReturnTypeHelper (_Function _Func, int,...) -> decltype(_Func())
 
template<typename _Function >
auto _VoidIsTaskHelper (_Function _Func, int, int) -> decltype(_Func(std::declval< task< void >>()), std::true_type())
 
template<typename _Function >
auto _VoidIsTaskHelper (_Function _Func, int,...) -> std::false_type
 
static void _ScheduleFuncWithAutoInline (const std::function< void()> &_Func, _TaskInliningMode_t _InliningMode)
 Schedule a functor with automatic inlining. Note that this is "fire and forget" scheduling, which cannot be waited on or canceled after scheduling. This schedule method will perform automatic inlining base on . More...
 
_Internal_task_options_get_internal_task_options (task_options &_Options)
 
const _Internal_task_options_get_internal_task_options (const task_options &_Options)
 
std::function< _Unit_type(void)> _MakeVoidToUnitFunc (const std::function< void(void)> &_Func)
 
template<typename _Type >
std::function< _Type(_Unit_type)> _MakeUnitToTFunc (const std::function< _Type(void)> &_Func)
 
template<typename _Type >
std::function< _Unit_type(_Type)> _MakeTToUnitFunc (const std::function< void(_Type)> &_Func)
 
std::function< _Unit_type(_Unit_type)> _MakeUnitToUnitFunc (const std::function< void(void)> &_Func)
 
template<typename _ReturnType , typename _Ty >
auto _IsValidTaskCtor (_Ty _Param, int, int, int, int) -> decltype(_Param(), std::true_type())
 
template<typename _ReturnType , typename _Ty >
auto _IsValidTaskCtor (_Ty _Param, int, int,...) -> decltype(_Param.set(details::declval< _ReturnType >()), std::true_type())
 
template<typename _ReturnType , typename _Ty >
auto _IsValidTaskCtor (_Ty _Param, int,...) -> decltype(_Param.set(), std::true_type())
 
template<typename _ReturnType , typename _Ty >
std::false_type _IsValidTaskCtor (_Ty _Param,...)
 
template<typename _ReturnType , typename _Ty >
void _ValidateTaskConstructorArgs (const _Ty &_Param)
 
template<typename _Ty >
_Ty _GetUnwrappedType (task< _Ty >)
 The following type traits are used for the create_task function. More...
 
template<typename _Ty >
auto _GetUnwrappedReturnType (_Ty _Arg, int) -> decltype(_GetUnwrappedType(_Arg))
 
template<typename _Ty >
_Ty _GetUnwrappedReturnType (_Ty,...)
 
template<typename _Ty >
_Ty _GetTaskType (task_completion_event< _Ty >, std::false_type)
 _GetTaskType functions will retrieve task type T in task[T](Arg), for given constructor argument Arg and its property "callable". It will automatically unwrap argument to get the final return type if necessary. More...
 
template<typename _Ty >
auto _GetTaskType (_Ty _NonFunc, std::false_type) -> decltype(_GetUnwrappedType(_NonFunc))
 
template<typename _Ty >
auto _GetTaskType (_Ty _Func, std::true_type) -> decltype(_GetUnwrappedReturnType(_Func(), 0))
 
void _GetTaskType (std::function< void()>, std::true_type)
 
template<typename _Ty >
auto _FilterValidTaskType (_Ty _Param, int) -> decltype(_GetTaskType(_Param, _IsCallable(_Param, 0)))
 
template<typename _Ty >
_BadArgType _FilterValidTaskType (_Ty _Param,...)
 
bool _IsHRCOMDisconnected (int __hr)
 
_CRTIMP2 int __cdecl _Schedule_chore (_Threadpool_chore *)
 
_CRTIMP2 void __cdecl _Release_chore (_Threadpool_chore *)
 
_CRTIMP2 int __cdecl _Reschedule_chore (const _Threadpool_chore *)
 
inline::std::shared_ptr< scheduler_interface > & _GetStaticAmbientSchedulerRef ()
 
_CRTIMP2 void __cdecl _ReportUnobservedException ()
 
template<class _Tuple_type >
_Tuple_type _Create_uninitialized_tuple () __GPU
 
template<typename _Array_type >
const _Buffer_descriptor_Get_buffer_descriptor (const _Array_type &_Array) __GPU
 
template<typename _Array_type >
_Ret_ _Ubiquitous_buffer_Get_buffer (const _Array_type &_Array) __CPU_ONLY
 
template<typename _Array_type >
_Event _Get_access_async (const _Array_type &_Array, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr) __CPU_ONLY
 
template<typename _Array_type >
void_Get_datasource_identity (const _Array_type &_Array)
 
template<typename _Texture_type >
const _Texture_descriptor_Get_texture_descriptor (const _Texture_type &_Tex) __GPU
 
template<typename _Texture_type >
_Ret_ _Texture_Get_texture (const _Texture_type &_Tex) __CPU_ONLY
 
template<int _Rank, template< int > class _T1, template< int > class _T2>
static void _Is_valid_section (const _T2< _Rank > &_Base_extent, const _T1< _Rank > &_Section_origin, const _T2< _Rank > &_Section_extent) __CPU_ONLY
 
template<int _Rank, template< int > class _T1>
static void _Is_valid_projection (int _I, const _T1< _Rank > &_Base_extent) __CPU_ONLY
 
template<int _Rank, template< int > class _T>
static void _Is_positive (const _T< _Rank > &_Tuple) __CPU_ONLY
 
template<int _Rank, template< int > class _T>
static void _Is_nonnegative (const _T< _Rank > &_Tuple) __CPU_ONLY
 
template<int _Rank, template< int > class _T>
static void _Is_valid_extent (const _T< _Rank > &_Tuple) __CPU_ONLY
 
template<int _Rank>
unsigned int _Get_max_mipmap_levels (const extent< _Rank > &_Extent)
 
void _Are_valid_mipmap_parameters (unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
 
template<int _Rank>
extent< _Rank > _Get_extent_at_level_unsafe (const extent< _Rank > &_Base_extent, unsigned int _Level) __GPU
 
template<int _Rank>
extent< _Rank > _Get_extent_at_level (const extent< _Rank > &_Base_extent, unsigned int _Level)
 
void _Is_valid_mipmap_range (unsigned int _Src_view_mipmap_levels, unsigned int _Dst_most_detailed_level, unsigned int _Dst_view_mipmap_levels)
 
template<int _Rank, typename _Kernel_type >
void _Parallel_for_each (_In_ _Host_Scheduling_info *_Sch_info, extent< _Rank > _Compute_domain, const _Kernel_type &_F)
 
template<int _Dim0, int _Dim1, int _Dim2, typename _Kernel_type >
void _Parallel_for_each (_In_ _Host_Scheduling_info *_Sch_info, tiled_extent< _Dim0, _Dim1, _Dim2 > _Compute_domain, const _Kernel_type &_F)
 
template<int _Dim0, int _Dim1, typename _Kernel_type >
void _Parallel_for_each (_In_ _Host_Scheduling_info *_Sch_info, tiled_extent< _Dim0, _Dim1 > _Compute_domain, const _Kernel_type &_F)
 
template<int _Dim0, typename _Kernel_type >
void _Parallel_for_each (_In_ _Host_Scheduling_info *_Sch_info, tiled_extent< _Dim0 > _Compute_domain, const _Kernel_type &_F)
 
void __dp_no_source_func () __GPU_ONLY
 
template<>
extent< 1 > _Get_extent_at_level_unsafe< 1 > (const extent< 1 > &_Base_extent, unsigned int _Level) __GPU
 
template<>
extent< 2 > _Get_extent_at_level_unsafe< 2 > (const extent< 2 > &_Base_extent, unsigned int _Level) __GPU
 
template<>
extent< 3 > _Get_extent_at_level_unsafe< 3 > (const extent< 3 > &_Base_extent, unsigned int _Level) __GPU
 

Variables

const size_t ERROR_MSG_BUFFER_SIZE = 1024
 
_CONCRTIMP const unsigned char _Byte_reverse_table []
 
static const int LOOP_UNROLL_THRESHOLD = 4
 

Typedef Documentation

typedef void(__cdecl * Concurrency::details::_Threadpool_callback) (void *)
typedef unsigned char Concurrency::details::_Unit_type
typedef std::atomic<long> Concurrency::details::atomic_long

Atomics

Enumeration Type Documentation

Enumerator
_do_not_initialize 
208  {
210  };
Enumerator
opEq 
opNeq 
opNot 
opAssign 
opAddEq 
opSubEq 
opMulEq 
opDivEq 
opModEq 
opAdd 
opSub 
opMul 
opDiv 
opMod 
226  {
227  // cmp op
228  opEq, // a == b
229  opNeq, // a != b
230  // not op
231  opNot, // !a
232  // compound assignment
233  opAssign, // a = b
234  opAddEq, // a += b;
235  opSubEq, // a -= b;
236  opMulEq, // a *= b
237  opDivEq, // a /= b
238  opModEq, // a %= b
239  // arithmetic ops
240  opAdd, // c = a + b
241  opSub, // c = a - b
242  opMul, // c = a * b
243  opDiv, // c = a / b
244  opMod, // c = a % b
245  };
Definition: xxamp.h:229
Definition: xxamp.h:242
Definition: xxamp.h:236
Definition: xxamp.h:238
Definition: xxamp.h:231
Definition: xxamp.h:240
Definition: xxamp.h:243
Definition: xxamp.h:228
Definition: xxamp.h:244
Definition: xxamp.h:237
Definition: xxamp.h:233
Definition: xxamp.h:241
Definition: xxamp.h:235
Definition: xxamp.h:234
Enumerator
_Uint_type 
_Int_type 
_Float_type 
_Unorm_type 
_Norm_type 
_Double_type 
_Invalid_type 
291  : unsigned int
292  {
293  _Uint_type = 0,
294  _Int_type = 1,
295  _Float_type = 2,
296  _Unorm_type = 3,
297  _Norm_type = 4,
298  _Double_type = 5,
299  _Invalid_type = 0xFFFFFFFF
300  };
Definition: amprt.h:295
Definition: amprt.h:297
Definition: amprt.h:294
Definition: amprt.h:293
Definition: amprt.h:298
Definition: amprt.h:296
Enumerator
_NotComplete 
_Completed 
_Canceled 
4302  {
4303  _NotComplete,
4304  _Completed,
4305  _Canceled
4306  };
Definition: concrt.h:4304
Definition: concrt.h:4305
Definition: concrt.h:4303

The enum defines inlining scheduling policy for ppltasks. Scheduling a chore or a functor with _TaskInliningMode will give scheduler a hint on whether apply inline execution or not.

As an optimization, we assigned an integer number to each option in the enum, which effectively stands for the maximal inlining depth (threshold) for current chore, and the scheduler will compare this threshold with current context's inlining depth to make inline decision. If the current context's inlining depth greater than this threshold, the chore will be scheduled on a new context, otherwise the chore will be scheduled inline. Minimal threshold 0 means do not inline; maximal threshold -1 (0xFFFFFFFF....) means always inline. 16 is a good default inlining threshold we figured out from experiment.

Enumerator
_NoInline 
_DefaultAutoInline 
_ForceInline 
222 {
223  // Disable inline scheduling
224  _NoInline = 0,
225  // Let runtime decide whether to do inline scheduling or not
226  _DefaultAutoInline = 16,
227  // Always do inline scheduling
228  _ForceInline = -1,
229 };
Definition: pplinterface.h:228
Definition: pplinterface.h:224
Definition: pplinterface.h:226

Function Documentation

void Concurrency::details::__dp_no_source_func ( )
inline
1400  {
1402  }
void __dp_no_source_stub() __GPU_ONLY
template<typename _InputIterator , typename _Distance >
std::enable_if<std::is_base_of<std::input_iterator_tag, typename std::iterator_traits<_InputIterator>::iterator_category>::value>::type Concurrency::details::_Advance_output_iterator ( _InputIterator &  _Iter,
_Distance  _N 
)
2811  {
2812  std::advance(_Iter, _N);
2813  }
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:1089
template<typename _OutputIterator , typename _Distance >
std::enable_if<!std::is_base_of<std::input_iterator_tag, typename std::iterator_traits<_OutputIterator>::iterator_category>::value>::type Concurrency::details::_Advance_output_iterator ( _OutputIterator &  _Iter,
size_t  _N 
)
2818  {
2819  for (size_t i = 0; i < _N; ++i)
2820  {
2821  _Iter++;
2822  }
2823  }
int i[4]
Definition: dvec.h:68
void Concurrency::details::_Are_valid_mipmap_parameters ( unsigned int  _Most_detailed_mipmap_level,
unsigned int  _Mipmap_levels = 0 
)
inline
1247  {
1248  if (_Most_detailed_mipmap_level >= 32)
1249  {
1250  throw runtime_exception("The most detailed mipmap level cannot be greater than or equal to 32", E_INVALIDARG);
1251  }
1252 
1253  if (_Mipmap_levels > 32)
1254  {
1255  throw runtime_exception("The number of mipmap levels cannot be greater than 32", E_INVALIDARG);
1256  }
1257  }
template<typename _Ty >
_TypeSelectorAsyncTask Concurrency::details::_AsyncOperationKindSelector ( task< _Ty >  )
_TypeSelectorNoAsync Concurrency::details::_AsyncOperationKindSelector (   ...)
template<int _Old_element_size, int _New_element_size>
int Concurrency::details::_Calculate_reinterpreted_size ( int  _Old_size)
1528 {
1529  int _Total_size = _Old_element_size * _Old_size;
1530  int _New_size = (_Total_size + _New_element_size - 1)/ _New_element_size;
1531 
1532  return _New_size;
1533 }
_AMPIMP _Event __cdecl Concurrency::details::_Copy_async_impl ( _In_ _Texture _Src_tex,
const size_t _Src_offset,
unsigned int  _Src_mipmap_level,
_Out_ _Texture _Dst_tex,
const size_t _Dst_offset,
unsigned int  _Dst_mipmap_level,
const size_t _Copy_extent,
const size_t _Preferred_copy_chunk_extent = NULL 
)
template<typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array< _Value_type, _Rank > &  _Src,
array< _Value_type, _Rank > &  _Dest 
)
5762 {
5763  if (_Src.extent.size() > _Dest.extent.size())
5764  {
5765  throw runtime_exception("Invalid _Src argument. _Src size exceeds total size of the _Dest.", E_INVALIDARG);
5766  }
5767 
5768  // We can obliterate the exisiting content of dest if it is about to be totally overwritten
5769  _Access_mode _Dest_access_mode = (_Src.extent.size() == _Dest.extent.size()) ? _Write_access : _Read_write_access;
5770 
5771  _Buffer_ptr _PBufSrc, _PBufDest;
5772  _Event _Ev = _Get_access_async(_Src, _Read_access, _PBufSrc);
5773  _Ev = _Ev._Add_event(_Get_access_async(_Dest, _Dest_access_mode, _PBufDest));
5774  size_t _NumElemsToCopy = (_Src.extent.size() * sizeof(_Value_type)) / _PBufSrc->_Get_elem_size();
5775  return _Ev._Add_continuation(std::function<_Event()>([_PBufSrc, _PBufDest, _NumElemsToCopy]() mutable -> _Event {
5776  return details::_Copy_impl(_PBufSrc, 0, _PBufDest, 0, _NumElemsToCopy);
5777  }));
5778 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
Definition: amprt.h:91
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
constexpr size_type size() const _NOEXCEPT
Definition: array:111
Definition: amprt.h:94
_Access_mode
Definition: amprt.h:88
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
Definition: amprt.h:318
Definition: amprt.h:92
template<typename InputIterator , typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( InputIterator  _SrcFirst,
InputIterator  _SrcLast,
array< _Value_type, _Rank > &  _Dest 
)
5782 {
5783  size_t _NumElemsToCopy = std::distance(_SrcFirst, _SrcLast);
5784  // We can obliterate the exisiting content of dest if it is about to be totally overwritten
5785  _Access_mode _Dest_access_mode = (_NumElemsToCopy == _Dest.extent.size()) ? _Write_access : _Read_write_access;
5786  _Buffer_ptr _PDestBuf;
5787  _Event _Ev = _Get_access_async(_Dest, _Dest_access_mode, _PDestBuf);
5788 
5789  return _Ev._Add_continuation(std::function<_Event()>([_SrcFirst, _SrcLast, _PDestBuf, _NumElemsToCopy]() mutable -> _Event {
5790  return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _SrcLast, _NumElemsToCopy, _PDestBuf, 0);
5791  }));
5792 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
_Iter_diff_t< _InIt > distance(_InIt _First, _InIt _Last)
Definition: xutility:1124
constexpr size_type size() const _NOEXCEPT
Definition: array:111
Definition: amprt.h:94
_Access_mode
Definition: amprt.h:88
Definition: amprt.h:318
Definition: amprt.h:92
template<typename OutputIterator , typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array< _Value_type, _Rank > &  _Src,
OutputIterator  _DestIter 
)
5796 {
5797  _Buffer_ptr _PSrcBuf;
5798  _Event _Ev = _Get_access_async(_Src, _Read_access, _PSrcBuf);
5799  size_t _NumElemsToCopy = (_Src.extent.size() * sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size();
5800  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _NumElemsToCopy, _DestIter]() mutable -> _Event {
5801  return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, 0, _NumElemsToCopy, _DestIter);
5802  }));
5803 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
Definition: amprt.h:91
constexpr size_type size() const _NOEXCEPT
Definition: array:111
Definition: amprt.h:318
template<typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array< _Value_type, _Rank > &  _Src,
const array_view< _Value_type, _Rank > &  _Dest 
)
5807 {
5808  const _Buffer_descriptor &_SrcBufDesc = _Get_buffer_descriptor(_Src);
5809  const _Buffer_descriptor &_DestBufDesc = _Get_buffer_descriptor(_Dest);
5810  if (_SrcBufDesc._Get_buffer_ptr() == _DestBufDesc._Get_buffer_ptr()) {
5811  throw runtime_exception("Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5812  }
5813 
5814  _Buffer_ptr _PSrcBuf, _PDestBuf;
5815  _Event _Ev = _Get_access_async(_Src, _Read_access, _PSrcBuf);
5816 
5817  // The source accelerator_view is driven by array's master location,
5818  // therefore we can pass nullptr to avoid unnecessary computation
5819  auto _AccelInfo = _Get_src_dest_accelerator_view(nullptr, &_DestBufDesc);
5820 
5821  _Ev = _Ev._Add_event(_Get_access_async(_DestBufDesc._Get_view_key(), _AccelInfo.second, _Write_access, _PDestBuf));
5822  _View_shape_ptr _PSrcShape = _Get_buffer_view_shape(_SrcBufDesc);
5823  _View_shape_ptr _PDestShape = _Get_buffer_view_shape(_DestBufDesc);
5824  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape]() mutable -> _Event {
5825  return details::_Copy_impl(_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape);
5826  }));
5827 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
Definition: amprt.h:91
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:313
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1063
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
Definition: amprt.h:318
Definition: amprt.h:92
template<typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array_view< const _Value_type, _Rank > &  _Src,
array< _Value_type, _Rank > &  _Dest 
)
5831 {
5832  const _Buffer_descriptor &_SrcBufDesc = _Get_buffer_descriptor(_Src);
5833  const _Buffer_descriptor &_DestBufDesc = _Get_buffer_descriptor(_Dest);
5834  if (_SrcBufDesc._Get_buffer_ptr() == _DestBufDesc._Get_buffer_ptr()) {
5835  throw runtime_exception("Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5836  }
5837 
5838  auto _AccelInfo = _Get_src_dest_accelerator_view(&_SrcBufDesc, &_DestBufDesc);
5839 
5840  _Buffer_ptr _PSrcBuf, _PDestBuf;
5841  _Event _Ev = _Get_access_async(_SrcBufDesc._Get_view_key(), _AccelInfo.first, _Read_access, _PSrcBuf);
5842  _Ev = _Ev._Add_event(_Get_access_async(_Dest, _Write_access, _PDestBuf));
5843  _View_shape_ptr _PSrcShape = _Get_buffer_view_shape(_SrcBufDesc);
5844  _View_shape_ptr _PDestShape = _Get_buffer_view_shape(_DestBufDesc);
5845  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape]() mutable -> _Event {
5846  return details::_Copy_impl(_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape);
5847  }));
5848 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
Definition: amprt.h:91
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:313
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1063
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
Definition: amprt.h:318
Definition: amprt.h:92
template<typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array_view< const _Value_type, _Rank > &  _Src,
const array_view< _Value_type, _Rank > &  _Dest 
)
5852 {
5853  const _Buffer_descriptor &_SrcBufDesc = _Get_buffer_descriptor(_Src);
5854  const _Buffer_descriptor &_DestBufDesc = _Get_buffer_descriptor(_Dest);
5855  _View_shape_ptr _PSrcShape = _Get_buffer_view_shape(_SrcBufDesc);
5856  _View_shape_ptr _PDestShape = _Get_buffer_view_shape(_DestBufDesc);
5857  if ((_SrcBufDesc._Get_buffer_ptr() == _DestBufDesc._Get_buffer_ptr()) && _PSrcShape->_Overlaps(_PDestShape)) {
5858  throw runtime_exception("Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5859  }
5860 
5861  auto _AccelInfo = _Get_src_dest_accelerator_view(&_SrcBufDesc, &_DestBufDesc);
5862 
5863  _Buffer_ptr _PSrcBuf, _PDestBuf;
5864  _Event _Ev = _Get_access_async(_SrcBufDesc._Get_view_key(), _AccelInfo.first, _Read_access, _PSrcBuf);
5865  _Ev = _Ev._Add_event(_Get_access_async(_DestBufDesc._Get_view_key(), _AccelInfo.second, _Write_access, _PDestBuf));
5866  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape]() mutable -> _Event {
5867  return details::_Copy_impl(_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape);
5868  }));
5869 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
Definition: amprt.h:91
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:313
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1063
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
Definition: amprt.h:318
Definition: amprt.h:92
template<typename InputIterator , typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( InputIterator  _SrcFirst,
InputIterator  _SrcLast,
const array_view< _Value_type, _Rank > &  _Dest 
)
5873 {
5874  static_assert(!std::is_const<_Value_type>::value, "Cannot copy to array_view<const _Value_type, _Rank>.");
5875 
5876  size_t _Src_size = std::distance(_SrcFirst, _SrcLast);
5877 
5878  // Source cannot be greater than destination
5879  if (_Src_size > _Dest.extent.size())
5880  {
5881  throw runtime_exception("Number of elements in range between [_SrcFirst, _SrcLast) exceeds total size of the _Dest.", E_INVALIDARG);
5882  }
5883 
5884 #pragma warning( push )
5885 #pragma warning( disable : 4127 ) // Disable warning about constant conditional expression
5886  // Higher ranks need to have as many elements as in _Dest array_view
5887  if ((_Rank > 1) && (_Src_size != _Dest.extent.size()))
5888  {
5889  throw runtime_exception("For _Rank > 1 the number of elements in range between [_SrcFirst, _SrcLast) has to be equal to total size of the _Dest.", E_INVALIDARG);
5890  }
5891 #pragma warning( pop )
5892 
5893  // We can obliterate the exisiting content of dest if it is about to be totally overwritten
5894  _Access_mode _Dest_access_mode = (_Src_size == _Dest.extent.size()) ? _Write_access : _Read_write_access;
5895 
5896  // Get read-write access for array_view on cpu_accelerator and take underlying pointer to data
5897  const _Buffer_descriptor &_DestBufDesc = _Get_buffer_descriptor(_Dest);
5898 
5899  auto _AccelInfo = _Get_src_dest_accelerator_view(nullptr, &_DestBufDesc);
5900 
5901  _Buffer_ptr _PDestBuf;
5902  _Event _Ev = _Get_access_async(_DestBufDesc._Get_view_key(), _AccelInfo.second, _Dest_access_mode, _PDestBuf);
5903 
5904  _View_shape_ptr _Dst_shape = _Get_buffer_view_shape(_DestBufDesc);
5905 
5906  // If the _Dst shape is linear then perform a linear copy
5907  unsigned int _Dst_linear_offset, _Dst_linear_size;
5908  if (_Dst_shape->_Is_view_linear(_Dst_linear_offset, _Dst_linear_size))
5909  {
5910  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PDestBuf, _SrcFirst, _SrcLast, _Src_size, _Dst_linear_offset]() mutable -> _Event {
5911  return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _SrcLast, _Src_size, _PDestBuf, _Dst_linear_offset);
5912  }));
5913  }
5914  else
5915  {
5916  _View_shape_ptr _Reinterpreted_dst_shape = _Create_reinterpreted_shape(_Dst_shape, _PDestBuf->_Get_elem_size(), sizeof(_Value_type));
5917 
5918  // Source has as many elements as in destination, reshape source to match destination shape
5919  std::vector<unsigned int> _Src_offset(_Reinterpreted_dst_shape->_Get_rank(), 0);
5920  _View_shape_ptr _Src_shape = details::_View_shape::_Create_view_shape(_Reinterpreted_dst_shape->_Get_rank(), 0 /* linear offset*/,
5921  _Reinterpreted_dst_shape->_Get_view_extent(), _Src_offset.data(),
5922  _Reinterpreted_dst_shape->_Get_view_extent());
5923 
5924  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PDestBuf, _SrcFirst, _Src_shape, _Dst_shape]() mutable -> _Event {
5925  return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _Src_shape, _PDestBuf, _Dst_shape);
5926  }));
5927  }
5928 
5929  return _Ev;
5930 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:313
_Iter_diff_t< _InIt > distance(_InIt _First, _InIt _Last)
Definition: xutility:1124
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
Definition: amprt.h:94
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
_Access_mode
Definition: amprt.h:88
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1063
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1885
Definition: amprt.h:318
Definition: amprt.h:92
template<typename OutputIterator , typename _Value_type , int _Rank>
_Event Concurrency::details::_Copy_async_impl ( const array_view< _Value_type, _Rank > &  _Src,
OutputIterator  _DestIter 
)
5934 {
5935  // Caller is responsible for passing valid _DestIter
5936 
5937  // Get read access for array_view on cpu_accelerator and take underlying pointer to data
5938  const _Buffer_descriptor &_SrcBufDesc = _Get_buffer_descriptor(_Src);
5939 
5940  auto _AccelInfo = _Get_src_dest_accelerator_view(&_SrcBufDesc, nullptr);
5941 
5942  _Buffer_ptr _PSrcBuf;
5943  _Event _Ev = _Get_access_async(_SrcBufDesc._Get_view_key(), _AccelInfo.first, _Read_access, _PSrcBuf);
5944 
5945  // Get source shape
5946  _View_shape_ptr _Src_shape = _Get_buffer_view_shape(_SrcBufDesc);
5947 
5948  // If the _Src_shape is linear then perform a linear copy
5949  unsigned int _Src_linear_offset, _Src_linear_size;
5950  if (_Src_shape->_Is_view_linear(_Src_linear_offset, _Src_linear_size))
5951  {
5952  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _Src_linear_offset, _Src_linear_size, _DestIter]() mutable -> _Event {
5953  return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, _Src_linear_offset, _Src_linear_size, _DestIter);
5954  }));
5955  }
5956  else
5957  {
5958  _View_shape_ptr _Reinterpreted_src_shape = _Create_reinterpreted_shape(_Src_shape, _PSrcBuf->_Get_elem_size(), sizeof(_Value_type));
5959 
5960  // Valid destination should have space for as many elements as in source array_view, reshape to match source view shape
5961  std::vector<unsigned int> _Dst_offset(_Reinterpreted_src_shape->_Get_rank(), 0);
5962  _View_shape_ptr _Dst_shape = details::_View_shape::_Create_view_shape(_Reinterpreted_src_shape->_Get_rank(), 0 /* linear offset*/,
5963  _Reinterpreted_src_shape->_Get_view_extent(), _Dst_offset.data(),
5964  _Reinterpreted_src_shape->_Get_view_extent());
5965 
5966  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _Src_shape, _DestIter, _Dst_shape]() mutable -> _Event {
5967  return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, _Src_shape, _DestIter, _Dst_shape);
5968  }));
5969  }
5970 
5971  return _Ev;
5972 }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
Definition: amprt.h:91
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
details::_Reference_counted_obj_ptr< details::_View_shape > _View_shape_ptr
Definition: amprt.h:313
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1063
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1885
Definition: amprt.h:318
template<typename _Output_iterator , typename _Value_type >
void Concurrency::details::_Copy_data_on_host_dst_iter ( int  _Rank,
const _Value_type *  _Src,
_Output_iterator  _Dst,
size_t  _Width,
size_t  _Height,
size_t  _Depth,
size_t  _Src_row_pitch_in_bytes,
size_t  _Src_depth_pitch_in_bytes,
size_t  _Dst_row_pitch,
size_t  _Dst_depth_pitch 
)
inline
2557  {
2558  switch(_Rank)
2559  {
2560  case 1:
2561  {
2562  const _Value_type * _End = _Src + _Width;
2564  }
2565  break;
2566  case 2:
2567  {
2568  const unsigned char *_Src_ptr = reinterpret_cast<const unsigned char *>(_Src);
2569  _Output_iterator _Dst_iter = _Dst;
2570  for (size_t _I = 0; _I < _Height; _I++)
2571  {
2572  const _Value_type * _Src_end = reinterpret_cast<const _Value_type*>(_Src_ptr) + _Width;
2573 
2574  std::copy(stdext::make_unchecked_array_iterator(reinterpret_cast<const _Value_type*>(_Src_ptr)), stdext::make_unchecked_array_iterator(_Src_end), _Dst_iter);
2575  std::advance(_Dst_iter, _Dst_row_pitch);
2576  _Src_ptr += _Src_row_pitch_in_bytes;
2577  }
2578  }
2579  break;
2580  case 3:
2581  {
2582  const unsigned char *_Src_ptr_slice_start = reinterpret_cast<const unsigned char *>(_Src);
2583  _Output_iterator _Dst_depth_slice_start = _Dst;
2584  for (size_t _I = 0; _I < _Depth; _I++)
2585  {
2586  _Output_iterator _Dst_iter = _Dst_depth_slice_start;
2587  const unsigned char *_Src_ptr = _Src_ptr_slice_start;
2588 
2589  for (size_t _J = 0; _J < _Height; _J++)
2590  {
2591  const _Value_type * _Src_end = reinterpret_cast<const _Value_type *>(_Src_ptr) + _Width;
2592 
2593  std::copy(stdext::make_unchecked_array_iterator(reinterpret_cast<const _Value_type*>(_Src_ptr)), stdext::make_unchecked_array_iterator(_Src_end), _Dst_iter);
2594 
2595  std::advance(_Dst_iter, _Dst_row_pitch);
2596  _Src_ptr += _Src_row_pitch_in_bytes;
2597  }
2598 
2599  _Src_ptr_slice_start += _Src_depth_pitch_in_bytes;
2600  std::advance(_Dst_depth_slice_start, _Dst_depth_pitch);
2601  }
2602  }
2603  break;
2604  default:
2605  _ASSERTE(FALSE);
2606  break;
2607  }
2608  }
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:725
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:1089
template<typename _Input_iterator , typename _Value_type >
void Concurrency::details::_Copy_data_on_host_src_iter ( int  _Rank,
_Input_iterator  _Src,
_Out_ _Value_type *  _Dst,
size_t  _Width,
size_t  _Height,
size_t  _Depth,
size_t  _Dst_row_pitch_in_bytes,
size_t  _Dst_depth_pitch_in_bytes,
size_t  _Src_row_pitch,
size_t  _Src_depth_pitch 
)
inline
2493  {
2494  switch(_Rank)
2495  {
2496  case 1:
2497  {
2498  _Input_iterator _End = _Src;
2499  std::advance(_End, _Width);
2501  }
2502  break;
2503  case 2:
2504  {
2505  unsigned char *_Dst_ptr = reinterpret_cast<unsigned char *>(_Dst);
2506  _Input_iterator _Src_start = _Src;
2507  for (size_t _I = 0; _I < _Height; _I++)
2508  {
2509  _Input_iterator _Src_end = _Src_start;
2510  std::advance(_Src_end, _Width);
2511 
2512  std::copy(_Src_start, _Src_end, stdext::make_unchecked_array_iterator(reinterpret_cast<_Value_type*>(_Dst_ptr)));
2513 
2514  _Dst_ptr += _Dst_row_pitch_in_bytes;
2515  std::advance(_Src_start, _Src_row_pitch);
2516  }
2517  }
2518  break;
2519  case 3:
2520  {
2521  unsigned char *_Dst_ptr_slice_start = reinterpret_cast<unsigned char *>(_Dst);
2522  _Input_iterator _Src_depth_slice_start = _Src;
2523  for (size_t _I = 0; _I < _Depth; _I++)
2524  {
2525  _Input_iterator _Src_start = _Src_depth_slice_start;
2526  unsigned char *_Dst_ptr = _Dst_ptr_slice_start;
2527 
2528  for (size_t _J = 0; _J < _Height; _J++)
2529  {
2530  _Input_iterator _Src_end = _Src_start;
2531  std::advance(_Src_end, _Width);
2532 
2533  std::copy(_Src_start, _Src_end, stdext::make_unchecked_array_iterator(reinterpret_cast<_Value_type*>(_Dst_ptr)));
2534 
2535  _Dst_ptr += _Dst_row_pitch_in_bytes;
2536  std::advance(_Src_start, _Src_row_pitch);
2537  }
2538 
2539  _Dst_ptr_slice_start += _Dst_depth_pitch_in_bytes;
2540  std::advance(_Src_depth_slice_start, _Src_depth_pitch);
2541  }
2542  }
2543  break;
2544  default:
2545  _ASSERTE(FALSE);
2546  break;
2547  }
2548  }
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:725
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:1089
_AMPIMP _Event __cdecl Concurrency::details::_Copy_impl ( _In_ _Buffer _Src,
size_t  _Src_offset,
_Out_ _Buffer _Dst,
size_t  _Dest_offset,
size_t  _Num_elems,
size_t  _Preferred_copy_chunk_num_elems = 0 
)
template<typename InputIterator , typename _Value_type >
_Event Concurrency::details::_Copy_impl ( InputIterator  _SrcFirst,
InputIterator  _SrcLast,
size_t  _NumElemsToCopy,
_Out_ _Buffer _Dst,
size_t  _Dest_offset,
size_t  _Preferred_copy_chunk_num_elems = 0 
)
inline
2694  {
2695  if (_NumElemsToCopy == 0) {
2696  return _Event();
2697  }
2698 
2699  if (_Dst == NULL) {
2700  throw runtime_exception("Failed to copy to buffer.", E_INVALIDARG);
2701  }
2702 
2703 #pragma warning ( push )
2704 #pragma warning ( disable : 6001 ) // Using uninitialized memory '*_Dst'
2705  if (((_NumElemsToCopy * sizeof(_Value_type)) + (_Dest_offset * _Dst->_Get_elem_size())) > (_Dst->_Get_num_elems() * _Dst->_Get_elem_size()))
2706  {
2707  throw runtime_exception("Invalid _Src argument(s). _Src size exceeds total size of the _Dest.", E_INVALIDARG);
2708  }
2709 #pragma warning ( pop )
2710 
2711  _ASSERTE(_NumElemsToCopy == (size_t)(std::distance(_SrcFirst, _SrcLast)));
2712 
2713  // If the dest is host accessible for write then we do the copy on
2714  // accelerator(accelerator::cpu_accelerator).default_view
2715  if (_Dst->_Is_host_accessible(_Write_access))
2716  {
2717  // Lets first map the _Dst buffer
2718  _Event _Ev = _Dst->_Map_buffer_async(_Write_access);
2719 
2720  // The _Dest is accessible on host. We just need to do a std::copy using a raw pointer as OutputIterator
2721  _Buffer_ptr _PDestBuf = _Dst;
2722  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PDestBuf,_Dest_offset, _SrcFirst, _SrcLast]() mutable -> _Event
2723  {
2724  _Value_type *_DestPtr = reinterpret_cast<_Value_type*>(reinterpret_cast<char*>(_PDestBuf->_Get_host_ptr()) + (_Dest_offset * _PDestBuf->_Get_elem_size()));
2725  std::copy(_SrcFirst, _SrcLast, stdext::make_unchecked_array_iterator(_DestPtr));
2726 
2727  return _Event();
2728  }));
2729 
2730  return _Ev;
2731  }
2732  else
2733  {
2734  // _Dest is on a device. Lets create a temp staging buffer on the _Dest accelerator_view and copy the input over
2735  // We may create a staging buffer of size smaller than the copy size and in that case we will perform the copy
2736  // as a series of smaller copies
2737  _Buffer_ptr _PDestBuf = _Dst;
2738  size_t _NumElemsToCopyRemaining = _NumElemsToCopy;
2739  size_t _PreferredNumElemsToCopyPerChunk = _Preferred_copy_chunk_num_elems;
2740  if (_PreferredNumElemsToCopyPerChunk == 0) {
2741  // If a preferred copy chunk size was not specified, lets pick one based on the
2742  // size of the copy
2743  _PreferredNumElemsToCopyPerChunk = _Get_preferred_copy_chunk_num_elems(_NumElemsToCopy, sizeof(_Value_type));
2744  }
2745  size_t _CurrDstOffset = _Dest_offset;
2746  InputIterator _CurrStartIter = _SrcFirst;
2747  _Event _Ev;
2748 
2749  size_t _Lcm = _Least_common_multiple(_Dst->_Get_elem_size(), sizeof(_Value_type));
2750  size_t _AdjustmentRatio = _Lcm / sizeof(_Value_type);
2751 
2752  do
2753  {
2754  size_t _AllocationNumElems = _PreferredNumElemsToCopyPerChunk;
2755  if (_NumElemsToCopyRemaining < _AllocationNumElems) {
2756  _AllocationNumElems = _NumElemsToCopyRemaining;
2757  }
2758 
2759  _Buffer_ptr _PDestStagingBuf = _Buffer::_Get_temp_staging_buffer(_Dst->_Get_accelerator_view(),
2760  _AllocationNumElems, sizeof(_Value_type));
2761 
2762  _ASSERTE(_PDestStagingBuf != NULL);
2763  _ASSERTE(_PDestStagingBuf->_Get_elem_size() == sizeof(_Value_type));
2764 
2765  InputIterator _CurrEndIter = _CurrStartIter;
2766  size_t _CurrNumElemsToCopy = _AllocationNumElems;
2767  if (_CurrNumElemsToCopy > _PDestStagingBuf->_Get_num_elems()) {
2768  _CurrNumElemsToCopy = _PDestStagingBuf->_Get_num_elems();
2769  }
2770 
2771  if (_NumElemsToCopyRemaining <= _CurrNumElemsToCopy) {
2772  _CurrNumElemsToCopy = _NumElemsToCopyRemaining;
2773  _CurrEndIter = _SrcLast;
2774  }
2775  else
2776  {
2777  // We need to adjust the _CurrNumElemsToCopy to be a multiple of the
2778  // least common multiple of the destination buffer's element size and sizeof(_Value_type).
2779  _CurrNumElemsToCopy = (_CurrNumElemsToCopy / _AdjustmentRatio) * _AdjustmentRatio;
2780  std::advance(_CurrEndIter, _CurrNumElemsToCopy);
2781  }
2782 
2783  _ASSERTE((_CurrNumElemsToCopy % _AdjustmentRatio) == 0);
2784 
2785  // This would not actually never block since we just created this staging buffer or are using
2786  // a cached one that is not in use
2787  _PDestStagingBuf->_Map_buffer(_Write_access, true /* _Wait */);
2788 
2789  // Copy from input to the staging using a raw pointer as OutputIterator
2790  std::copy(_CurrStartIter, _CurrEndIter, stdext::make_unchecked_array_iterator(reinterpret_cast<_Value_type*>(_PDestStagingBuf->_Get_host_ptr())));
2791 
2792  _Ev = _Ev._Add_event(_PDestStagingBuf->_Copy_to_async(_PDestBuf, _CurrNumElemsToCopy, 0, _CurrDstOffset));
2793 
2794  // Adjust the iterators and offsets
2795  _NumElemsToCopyRemaining -= _CurrNumElemsToCopy;
2796  _CurrDstOffset += (_CurrNumElemsToCopy * sizeof(_Value_type)) / _Dst->_Get_elem_size();
2797  _CurrStartIter = _CurrEndIter;
2798 
2799  } while (_NumElemsToCopyRemaining != 0);
2800 
2801  return _Ev;
2802  }
2803  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
#define NULL
Definition: vcruntime.h:236
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:725
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
Definition: chrono:290
_Iter_diff_t< _InIt > distance(_InIt _First, _InIt _Last)
Definition: xutility:1124
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:1089
Definition: amprt.h:318
Definition: amprt.h:92
_T _Least_common_multiple(_T _M, _T _N)
Definition: amprt.h:2681
size_t _Get_preferred_copy_chunk_num_elems(size_t _Total_num_elems, size_t _Elem_size)
Definition: amprt.h:2612
template<typename OutputIterator , typename _Value_type >
_Event Concurrency::details::_Copy_impl ( _In_ _Buffer _Src,
size_t  _Src_offset,
size_t  _Num_elems,
OutputIterator  _DestIter,
size_t  _Preferred_copy_chunk_num_elems = 0 
)
inline
2828  {
2829  if ((_Src == NULL) || ((_Src_offset + _Num_elems) > _Src->_Get_num_elems())) {
2830  throw runtime_exception("Failed to copy to buffer.", E_INVALIDARG);
2831  }
2832 
2833  if (_Num_elems == 0) {
2834  return _Event();
2835  }
2836 
2837  size_t _NumElemsToCopy = (_Num_elems * _Src->_Get_elem_size()) / sizeof(_Value_type);
2838 
2839  // If the src is host accessible for readthen we do the copy on
2840  // accelerator(accelerator::cpu_accelerator).default_view
2841  if (_Src->_Is_host_accessible(_Read_access))
2842  {
2843  // Map the _Src buffer
2844  _Event _Ev = _Src->_Map_buffer_async(_Read_access);
2845 
2846  // The _Src is accessible on host. We just need to do a std::copy using a raw pointer as OutputIterator
2847  _Buffer_ptr _PSrcBuf = _Src;
2848  _Ev = _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _Src_offset, _DestIter, _NumElemsToCopy]() mutable -> _Event
2849  {
2850  // The _Src is accessible on host. We just need to do a std::copy
2851  const _Value_type *_PFirst = reinterpret_cast<const _Value_type*>(reinterpret_cast<char*>(_PSrcBuf->_Get_host_ptr()) + (_Src_offset * _PSrcBuf->_Get_elem_size()));
2852  std::copy(_PFirst, _PFirst + _NumElemsToCopy, _DestIter);
2853 
2854  return _Event();
2855  }));
2856 
2857  return _Ev;
2858  }
2859  else
2860  {
2861  // The _Src is on the device. We need to copy it out to a temporary staging array
2862  // We may create a staging buffer of size smaller than the copy size and in that case we will
2863  // perform the copy as a series of smaller copies
2864 
2865  _Event _Ev;
2866 
2867  _Buffer_ptr _PSrcBuf = _Src;
2868  size_t _PreferredNumElemsToCopyPerChunk = _Preferred_copy_chunk_num_elems;
2869  if (_PreferredNumElemsToCopyPerChunk == 0) {
2870  // If a preferred copy chunk size was not specified, lets pick one based on the
2871  // size of the copy
2872  _PreferredNumElemsToCopyPerChunk = _Get_preferred_copy_chunk_num_elems(_NumElemsToCopy, sizeof(_Value_type));
2873  }
2874 
2875  size_t _AllocationNumElems = _PreferredNumElemsToCopyPerChunk;
2876  if (_NumElemsToCopy < _AllocationNumElems) {
2877  _AllocationNumElems = _NumElemsToCopy;
2878  }
2879 
2880  _Buffer_ptr _PSrcStagingBuf = _Buffer::_Get_temp_staging_buffer(_Src->_Get_accelerator_view(),
2881  _AllocationNumElems, sizeof(_Value_type));
2882 
2883  _ASSERTE(_PSrcStagingBuf != NULL);
2884  _ASSERTE(_PSrcStagingBuf->_Get_elem_size() == sizeof(_Value_type));
2885 
2886  // The total byte size of a copy chunk must be an integral multiple of both the
2887  // source buffer's element size and sizeof(_Value_type).
2888  size_t _Lcm = _Least_common_multiple(_Src->_Get_elem_size(), sizeof(_Value_type));
2889  size_t _AdjustmentRatio = _Lcm / sizeof(_Value_type);
2890 
2891  size_t _CurrNumElemsToCopy = _AllocationNumElems;
2892  if (_CurrNumElemsToCopy > _PSrcStagingBuf->_Get_num_elems()) {
2893  _CurrNumElemsToCopy = _PSrcStagingBuf->_Get_num_elems();
2894  }
2895  if (_NumElemsToCopy <= _CurrNumElemsToCopy)
2896  {
2897  _CurrNumElemsToCopy = _NumElemsToCopy;
2898  }
2899  else
2900  {
2901  // We need to adjust the _StagingBufNumElems to be a multiple of the
2902  // least common multiple of the source buffer's element size and sizeof(_Value_type).
2903  _CurrNumElemsToCopy = (_CurrNumElemsToCopy / _AdjustmentRatio) * _AdjustmentRatio;
2904  }
2905 
2906  _ASSERTE((_CurrNumElemsToCopy % _AdjustmentRatio) == 0);
2907 
2908  size_t _NumElemsToCopyRemaining = _NumElemsToCopy - _CurrNumElemsToCopy;
2909 
2910  _Ev = _PSrcBuf->_Copy_to_async(_PSrcStagingBuf, (_CurrNumElemsToCopy * sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size(), _Src_offset, 0);
2911 
2912  if (_NumElemsToCopyRemaining != 0)
2913  {
2914  _Ev = _Ev._Add_continuation(std::function<_Event()>([_DestIter, _PSrcBuf, _PSrcStagingBuf,
2915  _CurrNumElemsToCopy, _NumElemsToCopyRemaining,
2916  _Src_offset, _PreferredNumElemsToCopyPerChunk]() mutable -> _Event
2917  {
2918  // Initiate an asynchronous copy of the remaining part so that this part of the copy
2919  // makes progress while we consummate the copying of the first part
2920  size_t _CurrSrcOffset = _Src_offset + ((_CurrNumElemsToCopy * sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size());
2921  OutputIterator _CurrDestIter = _DestIter;
2922  _Advance_output_iterator<decltype(_CurrDestIter), size_t>(_CurrDestIter, _CurrNumElemsToCopy);
2923  _Event _Ret_ev = _Copy_impl<OutputIterator, _Value_type>(_PSrcBuf._Get_ptr(), _CurrSrcOffset,
2924  (_NumElemsToCopyRemaining * sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size(),
2925  _CurrDestIter, _PreferredNumElemsToCopyPerChunk);
2926 
2927  // Now copy the data from staging buffer to the destination
2928  _Value_type *_PFirst = reinterpret_cast<_Value_type*>(_PSrcStagingBuf->_Get_host_ptr());
2929  std::copy(_PFirst, _PFirst + _CurrNumElemsToCopy, _DestIter);
2930  return _Ret_ev;
2931  }));
2932  }
2933  else
2934  {
2935  _Ev = _Ev._Add_continuation(std::function<_Event()>([_DestIter, _PSrcStagingBuf, _CurrNumElemsToCopy]() mutable -> _Event
2936  {
2937  _Value_type *_PFirst = reinterpret_cast<_Value_type*>(_PSrcStagingBuf->_Get_host_ptr());
2938  std::copy(_PFirst, _PFirst + _CurrNumElemsToCopy, _DestIter);
2939  return _Event();
2940  }));
2941  }
2942 
2943  return _Ev;
2944  }
2945  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
#define NULL
Definition: vcruntime.h:236
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
Definition: amprt.h:91
Definition: chrono:290
Definition: amprt.h:318
_T _Least_common_multiple(_T _M, _T _N)
Definition: amprt.h:2681
size_t _Get_preferred_copy_chunk_num_elems(size_t _Total_num_elems, size_t _Elem_size)
Definition: amprt.h:2612
_AMPIMP _Event __cdecl Concurrency::details::_Copy_impl ( _In_ _Buffer _Src,
_View_shape_ptr  _Src_shape,
_Out_ _Buffer _Dst,
_View_shape_ptr  _Dst_shape 
)
template<typename InputIterator , typename _Value_type >
_Event Concurrency::details::_Copy_impl ( InputIterator  _SrcFirst,
_View_shape_ptr  _Src_shape,
_Inout_ _Buffer _Dst,
_View_shape_ptr  _Dst_shape 
)
inline
3012  {
3013  _ASSERTE(_Dst != NULL);
3014  _ASSERTE(_Src_shape != NULL);
3015  _ASSERTE(_Dst_shape != NULL);
3016 
3017  if (_Src_shape->_Is_projection()) {
3018  _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
3019  }
3020 
3021  if (_Dst_shape->_Is_projection()) {
3022  _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
3023  }
3024 
3025  _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
3026 
3027  _ASSERTE(_View_shape::_Compare_extent_with_elem_size(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(),
3028  sizeof(_Value_type), _Dst_shape->_Get_view_extent(), _Dst->_Get_elem_size()));
3029 
3030  if (_Dst->_Is_host_accessible(_Write_access))
3031  {
3032  // The destination buffer is accesible on the host. Map the _Dst buffer
3033  _Event _Ev = _Dst->_Map_buffer_async(_Write_access);
3034  _Buffer_ptr _PDestBuf = _Dst;
3035  return _Ev._Add_continuation(std::function<_Event()>([_SrcFirst, _Src_shape, _PDestBuf, _Dst_shape]() mutable -> _Event {
3036  return _Copy_impl_iter(_SrcFirst, _Src_shape, stdext::make_unchecked_array_iterator(reinterpret_cast<_Value_type*>(_PDestBuf->_Get_host_ptr())),
3037  _Create_reinterpreted_shape(_Dst_shape, _PDestBuf->_Get_elem_size(), sizeof(_Value_type)));
3038  }));
3039  }
3040  else
3041  {
3042  // The dest buffer is not accesible on host. Lets create a temporary
3043  // staging buffer on the destination buffer's accelerator_view
3044  _Buffer_ptr _PTempStagingBuf = _Buffer::_Create_stage_buffer(_Dst->_Get_accelerator_view(), accelerator(accelerator::cpu_accelerator).default_view,
3045  _Src_shape->_Get_view_size(), sizeof(_Value_type), true /* _Is_temp */);
3046 
3047  _PTempStagingBuf->_Map_buffer(_Write_access, true /* _Wait */);
3048  _Value_type *_Dst_ptr = reinterpret_cast<_Value_type*>(_PTempStagingBuf->_Get_host_ptr());
3049  _Event _Ev = _Copy_impl_iter(_SrcFirst, _Src_shape, stdext::make_unchecked_array_iterator(_Dst_ptr), _Src_shape);
3050 
3051  // Now copy from the staging buffer to the destination buffer
3052  _Buffer_ptr _PDestBuf = _Dst;
3053  return _Ev._Add_continuation(std::function<_Event()>([_PTempStagingBuf, _Src_shape, _PDestBuf, _Dst_shape]() mutable -> _Event {
3054  return _Copy_impl(_PTempStagingBuf, _Src_shape, _PDestBuf, _Dst_shape);
3055  }));
3056  }
3057  }
_Event _Copy_impl(_In_ _Buffer *_Src, _View_shape_ptr _Src_shape, OutputIterator _DestIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3060
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
#define NULL
Definition: vcruntime.h:236
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:725
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_Event _Copy_impl_iter(_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3109
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1885
Definition: amprt.h:318
Definition: amprt.h:92
template<typename OutputIterator , typename _Value_type >
_Event Concurrency::details::_Copy_impl ( _In_ _Buffer _Src,
_View_shape_ptr  _Src_shape,
OutputIterator  _DestIter,
_View_shape_ptr  _Dst_shape 
)
inline
3061  {
3062  _ASSERTE(_Src != NULL);
3063  _ASSERTE(_Src_shape != NULL);
3064  _ASSERTE(_Dst_shape != NULL);
3065 
3066  if (_Src_shape->_Is_projection()) {
3067  _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
3068  }
3069 
3070  if (_Dst_shape->_Is_projection()) {
3071  _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
3072  }
3073 
3074  _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
3075 
3076  _ASSERTE(_View_shape::_Compare_extent_with_elem_size(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(),
3077  _Src->_Get_elem_size(), _Dst_shape->_Get_view_extent(), sizeof(_Value_type)));
3078 
3079  if (_Src->_Is_host_accessible(_Read_access))
3080  {
3081  // The source buffer is accessible on the host. Map the _Src buffer
3082  _Event _Ev = _Src->_Map_buffer_async(_Read_access);
3083 
3084  _Buffer_ptr _PSrcBuf = _Src;
3085  return _Ev._Add_continuation(std::function<_Event()>([_PSrcBuf, _Src_shape, _DestIter, _Dst_shape]() mutable -> _Event {
3086  return _Copy_impl_iter(reinterpret_cast<_Value_type*>(_PSrcBuf->_Get_host_ptr()),
3087  _Create_reinterpreted_shape(_Src_shape, _PSrcBuf->_Get_elem_size(), sizeof(_Value_type)),
3088  _DestIter, _Dst_shape);
3089  }));
3090  }
3091  else
3092  {
3093  // The source buffer is not accessible on host. Lets create a temporary
3094  // staging buffer on the source buffer's accelerator_view and initiate a copy
3095  // from the source buffer to the temporary staging buffer
3096  _Buffer_ptr _PTempStagingBuf = _Buffer::_Create_stage_buffer(_Src->_Get_accelerator_view(), accelerator(accelerator::cpu_accelerator).default_view,
3097  _Dst_shape->_Get_view_size(), sizeof(_Value_type), true);
3098 
3099  _Event _Ev = _Src->_Copy_to_async(_PTempStagingBuf, _Src_shape, _Dst_shape);
3100  return _Ev._Add_continuation(std::function<_Event()>([_PTempStagingBuf, _Dst_shape, _DestIter]() mutable -> _Event {
3101  return _Copy_impl_iter(reinterpret_cast<_Value_type*>(_PTempStagingBuf->_Get_host_ptr()),
3102  _Dst_shape, _DestIter, _Dst_shape);
3103  }));
3104  }
3105  }
details::_Reference_counted_obj_ptr< details::_Buffer > _Buffer_ptr
Definition: amprt.h:308
#define NULL
Definition: vcruntime.h:236
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
Definition: amprt.h:91
_Event _Copy_impl_iter(_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3109
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1885
Definition: amprt.h:318
template<typename _InputInterator , typename _OutputIterator >
_Event Concurrency::details::_Copy_impl_iter ( _InputInterator  _SrcFirst,
_InputInterator  _SrcLast,
_OutputIterator  _DstFirst 
)
inline
3004  {
3005  std::copy(_SrcFirst, _SrcLast, _DstFirst);
3006  return _Event();
3007  }
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
Definition: amprt.h:318
template<typename _InputInterator , typename _OutputIterator >
_Event Concurrency::details::_Copy_impl_iter ( _InputInterator  _SrcIter,
_View_shape_ptr  _Src_shape,
_OutputIterator  _DstIter,
_View_shape_ptr  _Dst_shape 
)
inline
3111  {
3112  if (_Src_shape->_Is_projection()) {
3113  _Src_shape = _Src_shape->_Get_reduced_shape_for_copy();
3114  }
3115 
3116  if (_Dst_shape->_Is_projection()) {
3117  _Dst_shape = _Dst_shape->_Get_reduced_shape_for_copy();
3118  }
3119 
3120  _ASSERTE(_Src_shape->_Get_rank() == _Dst_shape->_Get_rank());
3121  _ASSERTE(_View_shape::_Compare_extent(_Src_shape->_Get_rank(), _Src_shape->_Get_view_extent(), _Dst_shape->_Get_view_extent()));
3122 
3123  // If both the _Src_shape and _Dst_shape are linear we can be more efficient
3124  unsigned int _Src_linear_offset, _Src_linear_size, _Dst_linear_offset, _Dst_linear_size;
3125  if (_Src_shape->_Is_view_linear(_Src_linear_offset, _Src_linear_size) &&
3126  _Dst_shape->_Is_view_linear(_Dst_linear_offset, _Dst_linear_size))
3127  {
3128  _ASSERTE(_Src_linear_size == _Dst_linear_size);
3129 
3130  // These iterators might be not contiguous, therefore we use std::advance
3131  std::advance(_SrcIter, _Src_linear_offset);
3132  auto _SrcLast = _SrcIter;
3133  std::advance(_SrcLast, _Src_linear_size);
3134  std::advance(_DstIter, _Dst_linear_offset);
3135 
3136  return _Copy_impl_iter(_SrcIter, _SrcLast, _DstIter);
3137  }
3138 
3139  std::vector<unsigned int> _Src_extent(_Src_shape->_Get_rank());
3140  std::vector<unsigned int> _Src_offset(_Src_shape->_Get_rank());
3141  std::vector<unsigned int> _Dst_extent(_Dst_shape->_Get_rank());
3142  std::vector<unsigned int> _Dst_offset(_Dst_shape->_Get_rank());
3143  std::vector<unsigned int> _Copy_extent(_Src_shape->_Get_rank());
3144 
3145  for (size_t i = 0; i < _Src_shape->_Get_rank(); ++i) {
3146  _Src_extent[i] = _Src_shape->_Get_base_extent()[i];
3147  _Src_offset[i] = _Src_shape->_Get_view_offset()[i];
3148  _Dst_extent[i] = _Dst_shape->_Get_base_extent()[i];
3149  _Dst_offset[i] = _Dst_shape->_Get_view_offset()[i];
3150  _Copy_extent[i] = _Src_shape->_Get_view_extent()[i];
3151  }
3152 
3153  _Array_copy_desc _Desc(
3154  _Src_shape->_Get_rank(),
3155  _Src_shape->_Get_linear_offset(),
3156  _Src_extent.data(),
3157  _Src_offset.data(),
3158  _Dst_shape->_Get_linear_offset(),
3159  _Dst_extent.data(),
3160  _Dst_offset.data(),
3161  _Copy_extent.data());
3162 
3163  // Note: Capturing shape pointers would be incorrect, they are valid for setting up the call.
3164  // They might be deleted right after this call completes.
3165  HRESULT hr = _Recursive_array_copy(_Desc, 1, [_SrcIter, _DstIter](const _Array_copy_desc &_Reduced) -> HRESULT {
3166 
3167  auto _SrcFirst = _SrcIter;
3168  auto _DstFirst = _DstIter;
3169 
3170  std::advance(_DstFirst, _Reduced._Dst_linear_offset + _Reduced._Dst_copy_offset[0]);
3171  std::advance(_SrcFirst, _Reduced._Src_linear_offset + _Reduced._Src_copy_offset[0]);
3172  auto _SrcLast = _SrcFirst;
3173  std::advance(_SrcLast, _Reduced._Copy_extents[0]);
3174 
3175  std::copy(_SrcFirst, _SrcLast, _DstFirst);
3176 
3177  return S_OK;
3178  });
3179 
3180  if (FAILED(hr)) {
3181  throw Concurrency::runtime_exception("Failed to copy between buffers", E_FAIL);
3182  }
3183 
3184  return _Event();
3185  }
std::vector< unsigned int > _Copy_extents
Definition: amprt.h:2990
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
#define S_OK
Definition: comutil.h:62
std::vector< unsigned int > _Dst_copy_offset
Definition: amprt.h:2987
_Event _Copy_impl_iter(_InputInterator _SrcIter, _View_shape_ptr _Src_shape, _OutputIterator _DstIter, _View_shape_ptr _Dst_shape)
Definition: amprt.h:3109
int i[4]
Definition: dvec.h:68
#define FAILED(hr)
Definition: comutil.h:71
std::vector< unsigned int > _Src_copy_offset
Definition: amprt.h:2982
unsigned int _Dst_linear_offset
Definition: amprt.h:2985
Exception thrown due to a C++ AMP runtime_exception. This is the base type for all C++ AMP exception ...
Definition: amprt_exceptions.h:29
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:1089
unsigned int _Src_linear_offset
Definition: amprt.h:2980
Definition: amprt.h:318
_AMPIMP HRESULT __cdecl _Recursive_array_copy(const _Array_copy_desc &_Desc, unsigned int _Native_copy_rank, std::function< HRESULT(const _Array_copy_desc &_Reduced)> _Native_copy_func)
_Ret_ _View_shape* Concurrency::details::_Create_reinterpreted_shape ( const _View_shape _Source_shape,
size_t  _Curr_elem_size,
size_t  _New_elem_size 
)
inline
1886  {
1887  unsigned int _Rank = _Source_shape->_Get_rank();
1888  size_t _LinearOffsetInBytes = _Source_shape->_Get_linear_offset() * _Curr_elem_size;
1889  size_t _BaseLSDExtentInBytes = (_Source_shape->_Get_base_extent())[_Rank - 1] * _Curr_elem_size;
1890  size_t _ViewLSDOffsetInBytes = (_Source_shape->_Get_view_offset())[_Rank - 1] * _Curr_elem_size;
1891  size_t _ViewLSDExtentInBytes = (_Source_shape->_Get_view_extent())[_Rank - 1] * _Curr_elem_size;
1892 
1893  _ASSERTE((_LinearOffsetInBytes % _New_elem_size) == 0);
1894  _ASSERTE((_BaseLSDExtentInBytes % _New_elem_size) == 0);
1895  _ASSERTE((_ViewLSDOffsetInBytes % _New_elem_size) == 0);
1896  _ASSERTE((_ViewLSDExtentInBytes % _New_elem_size) == 0);
1897 
1898  size_t _Temp_val = _LinearOffsetInBytes / _New_elem_size;
1899  _ASSERTE(_Temp_val <= UINT_MAX);
1900  unsigned int _New_linear_offset = static_cast<unsigned int>(_Temp_val);
1901 
1902  std::vector<unsigned int> _New_base_extent(_Rank);
1903  std::vector<unsigned int> _New_view_offset(_Rank);
1904  std::vector<unsigned int> _New_view_extent(_Rank);
1905  for (unsigned int i = 0; i < _Rank - 1; ++i) {
1906  _New_base_extent[i] = (_Source_shape->_Get_base_extent())[i];
1907  _New_view_offset[i] = (_Source_shape->_Get_view_offset())[i];
1908  _New_view_extent[i] = (_Source_shape->_Get_view_extent())[i];
1909  }
1910 
1911  // The extent in the least significant dimension needs to be adjusted
1912  _Temp_val = _BaseLSDExtentInBytes / _New_elem_size;
1913  _ASSERTE(_Temp_val <= UINT_MAX);
1914  _New_base_extent[_Rank - 1] = static_cast<unsigned int>(_Temp_val);
1915 
1916  _Temp_val = _ViewLSDOffsetInBytes / _New_elem_size;
1917  _ASSERTE(_Temp_val <= UINT_MAX);
1918  _New_view_offset[_Rank - 1] = static_cast<unsigned int>(_Temp_val);
1919 
1920  _Temp_val = _ViewLSDExtentInBytes / _New_elem_size;
1921  _ASSERTE(_Temp_val <= UINT_MAX);
1922  _New_view_extent[_Rank - 1] = static_cast<unsigned int>(_Temp_val);
1923 
1924  return _View_shape::_Create_view_shape(_Rank, _New_linear_offset, _New_base_extent.data(), _New_view_offset.data(), _New_view_extent.data());
1925  }
const unsigned int * _Get_base_extent() const
Definition: amprt.h:1601
const unsigned int * _Get_view_offset() const
Definition: amprt.h:1606
#define UINT_MAX
Definition: limits.h:36
const unsigned int * _Get_view_extent() const
Definition: amprt.h:1610
int i[4]
Definition: dvec.h:68
unsigned int _Get_linear_offset() const
Definition: amprt.h:1596
unsigned int _Get_rank() const
Definition: amprt.h:1591
template<class _Tuple_type >
_Tuple_type Concurrency::details::_Create_uninitialized_tuple ( )
215  {
216  return _Tuple_type(details::_do_not_initialize);
217  };
template<typename _Ty >
auto Concurrency::details::_FilterValidTaskType ( _Ty  _Param,
int   
) -> decltype(_GetTaskType(_Param, _IsCallable(_Param, 0)))
template<typename _Ty >
_BadArgType Concurrency::details::_FilterValidTaskType ( _Ty  _Param,
  ... 
)
_Ret_ _Accelerator_impl * Concurrency::details::_Get_accelerator_impl_ptr ( const accelerator &  _Accl)
inline
1562  {
1563  return _Accl._M_impl;
1564  }
_Ret_ _Accelerator_view_impl * Concurrency::details::_Get_accelerator_view_impl_ptr ( const accelerator_view &  _Accl_view)
inline
1557  {
1558  return _Accl_view._M_impl;
1559  }
_Event Concurrency::details::_Get_access_async ( const _View_key  _Key,
accelerator_view  _Av,
_Access_mode  _Mode,
_Buffer_ptr &  _Buf_ptr 
)
inline
3390  {
3391  return _Key->_Get_buffer_ptr()->_Get_access_async(_Key->_Get_view_key(), _Av, _Mode, _Buf_ptr);
3392  }
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
friend _Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
template<typename _Array_type >
_Event Concurrency::details::_Get_access_async ( const _Array_type &  _Array,
_Access_mode  _Mode,
_Buffer_ptr &  _Buf_ptr 
)
1080  {
1081  return _Array._Get_access_async(_Mode, _Buf_ptr);
1082  }
_AMPIMP _Ret_ _Amp_runtime_trace* __cdecl Concurrency::details::_Get_amp_trace ( )
template<typename _Array_type >
_Ret_ _Ubiquitous_buffer* Concurrency::details::_Get_buffer ( const _Array_type &  _Array)
1070  {
1071  return _Array._Get_buffer();
1072  }
template<typename _Array_type >
const _Buffer_descriptor& Concurrency::details::_Get_buffer_descriptor ( const _Array_type &  _Array)
1064  {
1065  return _Array._M_buffer_descriptor;
1066  }
_Ret_ _View_shape* Concurrency::details::_Get_buffer_view_shape ( const _Buffer_descriptor _Descriptor)
inline
3396  {
3397  return _Descriptor._Get_buffer_ptr()->_Get_view_shape(_Descriptor._Get_view_key());
3398  }
_AMPIMP _View_shape_ptr _Get_view_shape(_In_ _View_key _Key)
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
bool Concurrency::details::_Get_chunked_staging_texture ( _In_ _Texture _Tex,
const size_t _Copy_chunk_extent,
_Inout_ size_t _Remaining_copy_extent,
_Out_ size_t _Curr_copy_extent,
_Out_ _Texture_ptr *  _Staging_texture 
)
inline
2444  {
2445  bool _Truncated_copy = false;
2446  size_t _Allocation_extent[3] = { _Copy_chunk_extent[0], _Copy_chunk_extent[1], _Copy_chunk_extent[2] };
2447 
2448  unsigned int _Most_sig_idx = _Tex->_Get_rank() - 1;
2449 
2450  if (_Allocation_extent[_Most_sig_idx] > _Remaining_copy_extent[_Most_sig_idx]) {
2451  _Allocation_extent[_Most_sig_idx] = _Remaining_copy_extent[_Most_sig_idx];
2452  }
2453 
2454  _Texture_ptr _Stage = _Texture::_Get_temp_staging_texture(_Tex->_Get_accelerator_view(), _Tex->_Get_rank(),
2455  _Allocation_extent[0], _Allocation_extent[1], _Allocation_extent[2],
2456  /*_Mip_levels=*/1, _Tex->_Get_texture_format());
2457 
2458  std::copy(&_Allocation_extent[0], &_Allocation_extent[3], stdext::make_unchecked_array_iterator(&_Curr_copy_extent[0]));
2459  size_t _Staging_tex_extent[3] = {_Stage->_Get_width(), _Stage->_Get_height(), _Stage->_Get_depth()};
2460  if (_Curr_copy_extent[_Most_sig_idx] > _Staging_tex_extent[_Most_sig_idx]) {
2461  _Curr_copy_extent[_Most_sig_idx] = _Staging_tex_extent[_Most_sig_idx];
2462  }
2463 
2464  // The truncation can however happen only in the most significant dimension and lower
2465  // dimensions should not get truncated
2466  if (_Curr_copy_extent[_Most_sig_idx] < _Remaining_copy_extent[_Most_sig_idx])
2467  {
2468  _Remaining_copy_extent[_Most_sig_idx] -= _Curr_copy_extent[_Most_sig_idx];
2469  _Truncated_copy = true;
2470  }
2471 
2472  for (unsigned int _I = 0; _I < _Most_sig_idx; _I++)
2473  {
2474  _ASSERTE(_Curr_copy_extent[_I] == _Remaining_copy_extent[_I]);
2475  }
2476 
2477  *_Staging_texture = _Stage;
2478  return _Truncated_copy;
2479  }
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:725
details::_Reference_counted_obj_ptr< details::_Texture > _Texture_ptr
Definition: amprt.h:309
access_type Concurrency::details::_Get_cpu_access_type ( _Access_mode  _Cpu_access_mode)
inline
1946  {
1947  access_type _Cpu_access_type = access_type_none;
1948  if (_Cpu_access_mode & _Read_access) {
1949  _Cpu_access_type = static_cast<access_type>(_Cpu_access_type | access_type_read);
1950  }
1951 
1952  if (_Cpu_access_mode & _Write_access) {
1953  _Cpu_access_type = static_cast<access_type>(_Cpu_access_type | access_type_write);
1954  }
1955 
1956  return _Cpu_access_type;
1957  }
Definition: amprt.h:91
Definition: amprt.h:107
Definition: amprt.h:105
Definition: amprt.h:106
Definition: amprt.h:92
access_type
Enumeration type used to denote the various types of access to data.
Definition: amprt.h:103
template<typename _Array_type >
void* Concurrency::details::_Get_datasource_identity ( const _Array_type &  _Array)
1088  {
1089  return _Get_buffer_descriptor(_Array)._Get_buffer_ptr()._Get_ptr();
1090  }
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1063
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
_AMPIMP _Ret_ _Accelerator_impl_ptr* __cdecl Concurrency::details::_Get_devices ( )
template<int _Rank>
extent< _Rank > Concurrency::details::_Get_extent_at_level ( const extent< _Rank > &  _Base_extent,
unsigned int  _Level 
)
inline
142 {
144  return _Get_extent_at_level_unsafe(_Base_extent, _Level);
145 }
void _Are_valid_mipmap_parameters(unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
Definition: xxamp.h:1246
extent< _Rank > _Get_extent_at_level_unsafe(const extent< _Rank > &_Base_extent, unsigned int _Level) __GPU
Definition: xxamp_inl.h:95
template<int _Rank>
extent< _Rank > Concurrency::details::_Get_extent_at_level_unsafe ( const extent< _Rank > &  _Base_extent,
unsigned int  _Level 
)
inline
96 {
97  static_assert(_Rank >= 3, "_Rank >= 3");
98 }
template<>
extent<1> Concurrency::details::_Get_extent_at_level_unsafe< 1 > ( const extent< 1 > &  _Base_extent,
unsigned int  _Level 
)
inline
102 {
103  extent<1> _Extent_at_level(_Base_extent);
104 
105  _Extent_at_level[0] >>= _Level;
106  _Extent_at_level[0] = _Extent_at_level[0] ? _Extent_at_level[0] : 1;
107 
108  return _Extent_at_level;
109 }
Definition: type_traits:974
template<>
extent<2> Concurrency::details::_Get_extent_at_level_unsafe< 2 > ( const extent< 2 > &  _Base_extent,
unsigned int  _Level 
)
inline
113 {
114  extent<2> _Extent_at_level(_Base_extent);
115 
116  _Extent_at_level[0] >>= _Level;
117  _Extent_at_level[1] >>= _Level;
118  _Extent_at_level[0] = _Extent_at_level[0] ? _Extent_at_level[0] : 1;
119  _Extent_at_level[1] = _Extent_at_level[1] ? _Extent_at_level[1] : 1;
120 
121  return _Extent_at_level;
122 }
Definition: type_traits:974
template<>
extent<3> Concurrency::details::_Get_extent_at_level_unsafe< 3 > ( const extent< 3 > &  _Base_extent,
unsigned int  _Level 
)
inline
126 {
127  extent<3> _Extent_at_level(_Base_extent);
128 
129  _Extent_at_level[0] >>= _Level;
130  _Extent_at_level[1] >>= _Level;
131  _Extent_at_level[2] >>= _Level;
132  _Extent_at_level[0] = _Extent_at_level[0] ? _Extent_at_level[0] : 1;
133  _Extent_at_level[1] = _Extent_at_level[1] ? _Extent_at_level[1] : 1;
134  _Extent_at_level[2] = _Extent_at_level[2] ? _Extent_at_level[2] : 1;
135 
136  return _Extent_at_level;
137 }
Definition: type_traits:974
_Internal_task_options & Concurrency::details::_get_internal_task_options ( task_options &  _Options)
inline
1267  {
1268  return _Options._M_InternalTaskOptions;
1269  }
const _Internal_task_options & Concurrency::details::_get_internal_task_options ( const task_options &  _Options)
inline
1271  {
1272  return _Options._M_InternalTaskOptions;
1273  }
template<int _Rank>
unsigned int Concurrency::details::_Get_max_mipmap_levels ( const extent< _Rank > &  _Extent)
inline
1226  {
1227  unsigned int _Mipmap_levels = 0;
1228 
1229  // Find the largest dimension
1230  unsigned int _Max_dim = static_cast<unsigned int>(_Extent[0]);
1231  for(int _I=1; _I<_Rank; ++_I)
1232  {
1233  _Max_dim = static_cast<unsigned int>(_Extent[_I]) > _Max_dim ? static_cast<unsigned int>(_Extent[_I]) : _Max_dim;
1234  }
1235 
1236  // Find out how many times we can divide it by 2
1237  while(_Max_dim > 0)
1238  {
1239  _Mipmap_levels++;
1240  _Max_dim >>= 1;
1241  }
1242 
1243  return _Mipmap_levels;
1244  }
unsigned int Concurrency::details::_Get_mipmap_levels ( const _Texture _Tex)
inline
3910 {
3911  _ASSERTE(_Tex);
3912  return _Tex->_Get_mip_levels();
3913 }
unsigned int _Get_mip_levels() const
Definition: amprt.h:2306
unsigned char Concurrency::details::_Get_msb ( size_t  _Mask)
inline
72 {
73  unsigned long _Index = 0;
74 
75 #if (defined (_M_IX86) || defined (_M_ARM))
76  _BitScanReverse(&_Index, _Mask);
77 #else /* (defined (_M_IX86) || defined (_M_ARM)) */
78  _BitScanReverse64(&_Index, _Mask);
79 #endif /* (defined (_M_IX86) || defined (_M_ARM)) */
80 
81  return (unsigned char) _Index;
82 }
_AMPIMP size_t __cdecl Concurrency::details::_Get_num_devices ( )
void Concurrency::details::_Get_preferred_copy_chunk_extent ( unsigned int  _Rank,
size_t  _Width,
size_t  _Height,
size_t  _Depth,
size_t  _Bits_per_element,
_Out_writes_(3) size_t _Preferred_copy_chunk_extent 
)
inline
2621  {
2622  _ASSERTE(_Preferred_copy_chunk_extent != nullptr);
2623 
2624  size_t requestedByteSize = static_cast<size_t>((static_cast<unsigned long long>(_Width) *
2625  static_cast<unsigned long long>(_Height) *
2626  static_cast<unsigned long long>(_Depth) *
2627  static_cast<unsigned long long>(_Bits_per_element)) >> 3);
2628 
2629  size_t preferredChunkSize = _Get_preferred_copy_chunk_size(requestedByteSize);
2630 
2631  // Lets align the allocation size to the element size of the texture
2632  size_t preferredCopyChunkNumElems = static_cast<size_t>((static_cast<unsigned long long>(preferredChunkSize) * 8U) / _Bits_per_element);
2633 
2634  // Lets truncate the dimensions of the requested staging texture.
2635  // We only truncate in the most significant dimension
2636  switch (_Rank)
2637  {
2638  case 1:
2639  _Width = preferredCopyChunkNumElems;
2640  break;
2641  case 2:
2642  _Height = (preferredCopyChunkNumElems + _Width - 1) / _Width;
2643  break;
2644  case 3:
2645  _Depth = (preferredCopyChunkNumElems + (_Height * _Width) - 1) / (_Height * _Width);
2646  break;
2647  default:
2648  _ASSERTE(false);
2649  }
2650 
2651  _Preferred_copy_chunk_extent[0] = _Width;
2652  _Preferred_copy_chunk_extent[1] = _Height;
2653  _Preferred_copy_chunk_extent[2] = _Depth;
2654  }
_AMPIMP size_t __cdecl _Get_preferred_copy_chunk_size(size_t _Total_copy_size_in_bytes)
size_t Concurrency::details::_Get_preferred_copy_chunk_num_elems ( size_t  _Total_num_elems,
size_t  _Elem_size 
)
inline
2613  {
2614  size_t preferredChunkSize = _Get_preferred_copy_chunk_size(_Total_num_elems * _Elem_size);
2615 
2616  return (preferredChunkSize / _Elem_size);
2617  }
_AMPIMP size_t __cdecl _Get_preferred_copy_chunk_size(size_t _Total_copy_size_in_bytes)
_AMPIMP size_t __cdecl Concurrency::details::_Get_preferred_copy_chunk_size ( size_t  _Total_copy_size_in_bytes)
_AMPIMP _Access_mode __cdecl Concurrency::details::_Get_recommended_buffer_host_access_mode ( const accelerator_view &  _Av)
_AMPIMP std::pair<accelerator_view, accelerator_view> __cdecl Concurrency::details::_Get_src_dest_accelerator_view ( _In_opt_ const _Buffer_descriptor _SrcBuffDescPtr,
_In_opt_ const _Buffer_descriptor _DestBuffDescPtr 
)
_Access_mode Concurrency::details::_Get_synchronize_access_mode ( access_type  cpu_access_type)
inline
1928  {
1929  switch(cpu_access_type)
1930  {
1931  case access_type_auto:
1932  case access_type_read:
1933  return _Read_access;
1934  case access_type_write:
1935  return _Write_access;
1937  return _Read_write_access;
1938  case access_type_none:
1939  default:
1940  _ASSERTE(false);
1941  return _No_access;
1942  }
1943  }
Definition: amprt.h:91
Definition: amprt.h:107
Definition: amprt.h:94
Definition: amprt.h:109
Definition: amprt.h:105
Definition: amprt.h:90
Definition: amprt.h:106
Definition: amprt.h:92
template<typename _Texture_type >
_Ret_ _Texture* Concurrency::details::_Get_texture ( const _Texture_type &  _Tex)
1101  {
1102  return _Tex._Get_texture();
1103  }
template<typename _Texture_type >
const _Texture_descriptor& Concurrency::details::_Get_texture_descriptor ( const _Texture_type &  _Tex)
1095  {
1096  return _Tex._M_texture_descriptor;
1097  }
_CONCRTIMP size_t __cdecl Concurrency::details::_GetCombinableSize ( )
_CONCRTIMP const _CONCRT_TRACE_INFO* Concurrency::details::_GetConcRTTraceInfo ( )

Retrieves a pointer to the internal trace flags and level information for the Concurrency runtime ETW provider.

unsigned int _CONCRTIMP __cdecl Concurrency::details::_GetConcurrency ( )

Returns the hardware concurrency available to the Concurrency Runtime, taking into account process affinity, or any restrictions in place because of the set_task_execution_resources method.

inline ::std::shared_ptr<scheduler_interface>& Concurrency::details::_GetStaticAmbientSchedulerRef ( )
81 {
82  static ::std::shared_ptr<scheduler_interface> _S_scheduler;
83  return _S_scheduler;
84 }
template<typename _Ty >
_Ty Concurrency::details::_GetTaskType ( task_completion_event< _Ty >  ,
std::false_type   
)

_GetTaskType functions will retrieve task type T in task[T](Arg), for given constructor argument Arg and its property "callable". It will automatically unwrap argument to get the final return type if necessary.

template<typename _Ty >
auto Concurrency::details::_GetTaskType ( _Ty  _NonFunc,
std::false_type   
) -> decltype(_GetUnwrappedType(_NonFunc))
template<typename _Ty >
auto Concurrency::details::_GetTaskType ( _Ty  _Func,
std::true_type   
) -> decltype(_GetUnwrappedReturnType(_Func(), 0))
void Concurrency::details::_GetTaskType ( std::function< void()>  ,
std::true_type   
)
template<typename _Ty >
auto Concurrency::details::_GetUnwrappedReturnType ( _Ty  _Arg,
int   
) -> decltype(_GetUnwrappedType(_Arg))
template<typename _Ty >
_Ty Concurrency::details::_GetUnwrappedReturnType ( _Ty  ,
  ... 
)
template<typename _Ty >
_Ty Concurrency::details::_GetUnwrappedType ( task< _Ty >  )

The following type traits are used for the create_task function.

template<typename _T >
_T Concurrency::details::_Greatest_common_divisor ( _T  _M,
_T  _N 
)
inline
2659  {
2660  static_assert(std::is_unsigned<_T>::value, "This GCD function only supports unsigned integral types");
2661 
2662  _ASSERTE((_M > 0) && (_N > 0));
2663 
2664  if (_N > _M) {
2665  std::swap(_N , _M);
2666  }
2667 
2668  _T _Temp;
2669  while (_N > 0)
2670  {
2671  _Temp = _N;
2672  _N = _M % _N;
2673  _M = _Temp;
2674  }
2675 
2676  return _M;
2677  }
void swap(array< _Ty, _Size > &_Left, array< _Ty, _Size > &_Right) _NOEXCEPT_OP(_NOEXCEPT_OP(_Left.swap(_Right)))
Definition: array:433
template<class _Ty >
void Concurrency::details::_InternalDeleteHelper ( _Ty *  _PObject)
273  {
274  delete _PObject;
275  }
bool Concurrency::details::_Is_cpu_accelerator ( const accelerator &  _Accl)
inline
3402  {
3403  return (_Accl.device_path == accelerator::cpu_accelerator);
3404  }
_AMPIMP bool __cdecl Concurrency::details::_Is_D3D_accelerator_view ( const accelerator_view &  _Av)
template<int _Rank, template< int > class _T>
static void Concurrency::details::_Is_nonnegative ( const _T< _Rank > &  _Tuple)
static
1169  {
1170  bool valid = true;
1171  for (int i = 0; i < _Rank; ++i)
1172  {
1173  if (_Tuple[i] < 0) {
1174  valid = false;
1175  break;
1176  }
1177  }
1178 
1179  if (!valid) {
1180  throw runtime_exception("Invalid - values for each dimension must be >= 0", E_INVALIDARG);
1181  }
1182  }
int i[4]
Definition: dvec.h:68
template<int _Rank, template< int > class _T>
static void Concurrency::details::_Is_positive ( const _T< _Rank > &  _Tuple)
static
1145  {
1146  bool valid = true;
1147  for (int i = 0; i < _Rank; ++i)
1148  {
1149  if (_Tuple[i] <= 0) {
1150  valid = false;
1151  break;
1152  }
1153  }
1154 
1155  if (!valid) {
1156  throw runtime_exception("Invalid - values for each dimension must be > 0", E_INVALIDARG);
1157  }
1158  }
int i[4]
Definition: dvec.h:68
bool Concurrency::details::_Is_valid_access_mode ( _Access_mode  _Mode)
inline
418  {
419  if ((_Mode != _Read_access) &&
420  (_Mode != _Write_access) &&
421  (_Mode != _Read_write_access))
422  {
423  return false;
424  }
425 
426  return true;
427  }
Definition: amprt.h:91
Definition: amprt.h:94
Definition: amprt.h:92
template<int _Rank, template< int > class _T>
static void Concurrency::details::_Is_valid_extent ( const _T< _Rank > &  _Tuple)
static
1196  {
1197  _Is_positive(_Tuple);
1198 
1199  bool totalSizeValid = true;
1200  unsigned long long totalSize = (unsigned long long)_Tuple[0];
1201 #pragma warning( push )
1202 #pragma warning( disable : 6294 )
1203  for (int i = 1; i < _Rank; ++i)
1204  {
1205  totalSize *= (unsigned long long)_Tuple[i];
1206  if (totalSize > UINT_MAX) {
1207  totalSizeValid = false;
1208  break;
1209  }
1210  }
1211 #pragma warning( pop )
1212 
1213  if (!totalSizeValid) {
1214  throw runtime_exception("Invalid - extent size exceeds UINT_MAX", E_INVALIDARG);
1215  }
1216  }
#define UINT_MAX
Definition: limits.h:36
int i[4]
Definition: dvec.h:68
static void _Is_positive(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1144
void Concurrency::details::_Is_valid_mipmap_range ( unsigned int  _Src_view_mipmap_levels,
unsigned int  _Dst_most_detailed_level,
unsigned int  _Dst_view_mipmap_levels 
)
inline
1267  {
1268  _Are_valid_mipmap_parameters(_Dst_most_detailed_level, _Dst_view_mipmap_levels);
1269 
1270  if (_Dst_view_mipmap_levels == 0 || _Src_view_mipmap_levels < _Dst_most_detailed_level + _Dst_view_mipmap_levels)
1271  {
1272  throw runtime_exception("Invalid texture mipmap range", E_INVALIDARG);
1273  }
1274  }
void _Are_valid_mipmap_parameters(unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
Definition: xxamp.h:1246
template<int _Rank, template< int > class _T1>
static void Concurrency::details::_Is_valid_projection ( int  _I,
const _T1< _Rank > &  _Base_extent 
)
static
1132  {
1133  if ((_I < 0) || (_I >= _Base_extent[0])) {
1134  throw runtime_exception("the specified projection index is out of bound", E_INVALIDARG);
1135  }
1136  }
template<int _Rank, template< int > class _T1, template< int > class _T2>
static void Concurrency::details::_Is_valid_section ( const _T2< _Rank > &  _Base_extent,
const _T1< _Rank > &  _Section_origin,
const _T2< _Rank > &  _Section_extent 
)
static
1110  {
1111  _Is_nonnegative(_Section_origin);
1112  _Is_positive(_Section_extent);
1113 
1114  for (int i = 0; i < _Rank; ++i)
1115  {
1116  if ((_Section_origin[i] + _Section_extent[i]) > _Base_extent[i]) {
1117  throw runtime_exception("the specified section index and extent are out of bound", E_INVALIDARG);
1118  }
1119  }
1120  }
int i[4]
Definition: dvec.h:68
static void _Is_nonnegative(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1168
static void _Is_positive(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1144
template<typename _Function >
auto Concurrency::details::_IsCallable ( _Function  _Func,
int   
) -> decltype(_Func(), std::true_type())
template<typename _Function >
std::false_type Concurrency::details::_IsCallable ( _Function  ,
  ... 
)
bool Concurrency::details::_IsHRCOMDisconnected ( int  __hr)
inline
4449  {
4450  return __hr == 0x800706BA // HRESULT_FROM_WIN32(RPC_S_SERVER_UNAVAILABLE)
4451  || __hr == 0x80010108 // RPC_E_DISCONNECTED
4452  || __hr == 0x89020001; // JSCRIPT_E_CANTEXECUTE
4453  }
template<typename _Function , typename _Type >
auto Concurrency::details::_IsTaskHelper ( _Type  _Obj,
_Function  _Func,
int  ,
int   
) -> decltype(_Func(std::declval< task< _Type >>()), std::true_type())
template<typename _Function , typename _Type >
auto Concurrency::details::_IsTaskHelper ( _Type  _Obj,
_Function  _Func,
int  ,
  ... 
) -> std::false_type
template<typename _ReturnType , typename _Ty >
auto Concurrency::details::_IsValidTaskCtor ( _Ty  _Param,
int  ,
int  ,
int  ,
int   
) -> decltype(_Param(), std::true_type())
template<typename _ReturnType , typename _Ty >
auto Concurrency::details::_IsValidTaskCtor ( _Ty  _Param,
int  ,
int  ,
  ... 
) -> decltype(_Param.set(details::declval< _ReturnType >()), std::true_type())
template<typename _ReturnType , typename _Ty >
auto Concurrency::details::_IsValidTaskCtor ( _Ty  _Param,
int  ,
  ... 
) -> decltype(_Param.set(), std::true_type())
template<typename _ReturnType , typename _Ty >
std::false_type Concurrency::details::_IsValidTaskCtor ( _Ty  _Param,
  ... 
)
template<typename _T >
_T Concurrency::details::_Least_common_multiple ( _T  _M,
_T  _N 
)
inline
2682  {
2683  static_assert(std::is_unsigned<_T>::value, "This LCM function only supports unsigned integral types");
2684 
2685  _ASSERTE((_M > 0) && (_N > 0));
2686 
2687  _T _Gcd = _Greatest_common_divisor(_M, _N);
2688  return ((_M / _Gcd) * _N);
2689  }
_T _Greatest_common_divisor(_T _M, _T _N)
Definition: amprt.h:2658
Definition: ratio:92
template<typename _Type >
std::function<_Unit_type(_Type)> Concurrency::details::_MakeTToUnitFunc ( const std::function< void(_Type)> &  _Func)
2311  {
2312  return [=](_Type _Obj) -> _Unit_type { _Func(_Obj); return _Unit_type(); };
2313  }
unsigned char _Unit_type
Definition: ppltasks.h:191
template<typename _Type >
std::function<_Type(_Unit_type)> Concurrency::details::_MakeUnitToTFunc ( const std::function< _Type(void)> &  _Func)
2305  {
2306  return [=](_Unit_type) -> _Type { return _Func(); };
2307  }
unsigned char _Unit_type
Definition: ppltasks.h:191
std::function<_Unit_type(_Unit_type)> Concurrency::details::_MakeUnitToUnitFunc ( const std::function< void(void)> &  _Func)
inline
2316  {
2317  return [=](_Unit_type) -> _Unit_type { _Func(); return _Unit_type(); };
2318  }
unsigned char _Unit_type
Definition: ppltasks.h:191
std::function<_Unit_type(void)> Concurrency::details::_MakeVoidToUnitFunc ( const std::function< void(void)> &  _Func)
inline
2299  {
2300  return [=]() -> _Unit_type { _Func(); return _Unit_type(); };
2301  }
unsigned char _Unit_type
Definition: ppltasks.h:191
template<int _Rank, typename _Kernel_type >
void Concurrency::details::_Parallel_for_each ( _In_ _Host_Scheduling_info *  _Sch_info,
extent< _Rank >  _Compute_domain,
const _Kernel_type &  _F 
)
template<int _Dim0, int _Dim1, int _Dim2, typename _Kernel_type >
void Concurrency::details::_Parallel_for_each ( _In_ _Host_Scheduling_info *  _Sch_info,
tiled_extent< _Dim0, _Dim1, _Dim2 >  _Compute_domain,
const _Kernel_type &  _F 
)
template<int _Dim0, int _Dim1, typename _Kernel_type >
void Concurrency::details::_Parallel_for_each ( _In_ _Host_Scheduling_info *  _Sch_info,
tiled_extent< _Dim0, _Dim1 >  _Compute_domain,
const _Kernel_type &  _F 
)
template<int _Dim0, typename _Kernel_type >
void Concurrency::details::_Parallel_for_each ( _In_ _Host_Scheduling_info *  _Sch_info,
tiled_extent< _Dim0 >  _Compute_domain,
const _Kernel_type &  _F 
)
template<class _Mylist >
_Solist_const_iterator<_Mylist>& Concurrency::details::_Rechecked ( _Solist_const_iterator< _Mylist > &  _Iterator,
typename _Solist_const_iterator< _Mylist >::_Unchecked_type  _Right 
)
inline
107 {
108  return (_Iterator._Rechecked(_Right));
109 }
constexpr const _Ty &() _Right
Definition: algorithm:3591
template<class _Mylist >
_Solist_iterator<_Mylist>& Concurrency::details::_Rechecked ( _Solist_iterator< _Mylist > &  _Iterator,
typename _Solist_iterator< _Mylist >::_Unchecked_type  _Right 
)
inline
189 {
190  return (_Iterator._Rechecked(_Right));
191 }
constexpr const _Ty &() _Right
Definition: algorithm:3591
_AMPIMP HRESULT __cdecl Concurrency::details::_Recursive_array_copy ( const _Array_copy_desc _Desc,
unsigned int  _Native_copy_rank,
std::function< HRESULT(const _Array_copy_desc &_Reduced)>  _Native_copy_func 
)
_AMPIMP void __cdecl Concurrency::details::_Register_async_event ( const _Event _Ev,
const std::shared_future< void > &  _Shared_future 
)
void Concurrency::details::_RegisterConcRTEventTracing ( )

Register ConcRT as an ETW Event Provider.

_CRTIMP2 void __cdecl Concurrency::details::_Release_chore ( _Threadpool_chore *  )
_CRTIMP2 void __cdecl Concurrency::details::_ReportUnobservedException ( )
_CRTIMP2 int __cdecl Concurrency::details::_Reschedule_chore ( const _Threadpool_chore *  )
template<typename _Function , typename _Type >
auto Concurrency::details::_ReturnTypeHelper ( _Type  _Obj,
_Function  _Func,
int  ,
int   
) -> decltype(_Func(std::declval< task< _Type >>()))
template<typename _Function , typename _Type >
auto Concurrency::details::_ReturnTypeHelper ( _Type  _Obj,
_Function  _Func,
int  ,
  ... 
) -> decltype(_Func(_Obj))
template<typename _Function , typename _Type >
auto Concurrency::details::_ReturnTypeHelper ( _Type  _Obj,
_Function  _Func,
  ... 
) -> _BadContinuationParamType
unsigned char Concurrency::details::_Reverse_byte ( unsigned char  _Original_byte)
inline
65 {
66  // return ((_Original_byte * 0x80200802ULL) & 0x0884422110ULL) * 0x0101010101ULL >> 32;
67  return _Byte_reverse_table[_Original_byte];
68 }
_CONCRTIMP const unsigned char _Byte_reverse_table[]
_CRTIMP2 int __cdecl Concurrency::details::_Schedule_chore ( _Threadpool_chore *  )
static void Concurrency::details::_ScheduleFuncWithAutoInline ( const std::function< void()> &  _Func,
_TaskInliningMode_t  _InliningMode 
)
static

Schedule a functor with automatic inlining. Note that this is "fire and forget" scheduling, which cannot be waited on or canceled after scheduling. This schedule method will perform automatic inlining base on .

Parameters
_FuncThe user functor need to be scheduled.
_InliningModeThe inlining scheduling policy for current functor.
492  {
493  _TaskCollection_t::_RunTask(&_TaskProcThunk::_Bridge, new _TaskProcThunk(_Func), _InliningMode);
494  }
_AMPIMP accelerator __cdecl Concurrency::details::_Select_default_accelerator ( )
_AMPIMP bool __cdecl Concurrency::details::_Set_default_accelerator ( _Accelerator_impl_ptr  _Accl)
template<class _Type >
__int64 Concurrency::details::_Trace_agents_get_id ( _Type *  _PObject)
436  {
437  return reinterpret_cast<__int64>(_PObject);
438  }
template<class _Mylist >
_Solist_const_iterator<_Mylist>::_Unchecked_type Concurrency::details::_Unchecked ( _Solist_const_iterator< _Mylist >  _Iterator)
inline
100 {
101  return (_Iterator._Unchecked());
102 }
template<class _Mylist >
_Solist_iterator<_Mylist>::_Unchecked_type Concurrency::details::_Unchecked ( _Solist_iterator< _Mylist >  _Iterator)
inline
182 {
183  return (_Iterator._Unchecked());
184 }
void _CONCRTIMP __cdecl Concurrency::details::_UnderlyingYield ( )

Default method for yielding during a spin wait

void Concurrency::details::_UnregisterConcRTEventTracing ( )

Unregister ConcRT as an ETW Event Provider.

template<typename _ReturnType , typename _Ty >
void Concurrency::details::_ValidateTaskConstructorArgs ( const _Ty &  _Param)
2759  {
2760  static_assert(std::is_same<decltype(_IsValidTaskCtor<_ReturnType, _Ty>(_Param,0,0,0,0)),std::true_type>::value,
2761 #if defined (__cplusplus_winrt)
2762  "incorrect argument for task constructor; must be a callable object, an asynchronous operation or a task_completion_event"
2763 #else /* defined (__cplusplus_winrt) */
2764  "incorrect argument for task constructor; must be either a callable object or a task_completion_event"
2765 #endif /* defined (__cplusplus_winrt) */
2766  );
2767 #if defined (__cplusplus_winrt)
2768  static_assert(!(std::is_same<_Ty,_ReturnType>::value && details::_IsIAsyncInfo<_Ty>::_Value),
2769  "incorrect template argument for task; consider using the return type of the async operation");
2770 #endif /* defined (__cplusplus_winrt) */
2771  }
integral_constant< bool, true > true_type
Definition: xtr1common:40
_In_ int _Value
Definition: setjmp.h:173
template<typename _Function >
auto Concurrency::details::_VoidIsTaskHelper ( _Function  _Func,
int  ,
int   
) -> decltype(_Func(std::declval< task< void >>()), std::true_type())
template<typename _Function >
auto Concurrency::details::_VoidIsTaskHelper ( _Function  _Func,
int  ,
  ... 
) -> std::false_type
template<typename _Function >
auto Concurrency::details::_VoidReturnTypeHelper ( _Function  _Func,
int  ,
int   
) -> decltype(_Func(std::declval< task< void >>()))
template<typename _Function >
auto Concurrency::details::_VoidReturnTypeHelper ( _Function  _Func,
int  ,
  ... 
) -> decltype(_Func())
template<typename _T >
_T Concurrency::details::atomic_add ( std::atomic< _T > &  _Target,
_T  _Value 
)
265 {
266  return _Target.fetch_add(_Value) + _Value;
267 }
const void * _Target(const _XSTD2 type_info &_Info) const _NOEXCEPT
Definition: functional:413
_In_ int _Value
Definition: setjmp.h:173
template<typename _T >
_T Concurrency::details::atomic_compare_exchange ( std::atomic< _T > &  _Target,
_T  _Exchange,
_T  _Comparand 
)
239 {
240  _T _Result = _Comparand;
241  _Target.compare_exchange_strong(_Result, _Exchange);
242  return _Result;
243 }
const void * _Target(const _XSTD2 type_info &_Info) const _NOEXCEPT
Definition: functional:413
template<typename _T >
_T Concurrency::details::atomic_decrement ( std::atomic< _T > &  _Target)
259 {
260  return _Target.fetch_sub(1) - 1;
261 }
const void * _Target(const _XSTD2 type_info &_Info) const _NOEXCEPT
Definition: functional:413
template<typename _T >
_T Concurrency::details::atomic_exchange ( std::atomic< _T > &  _Target,
_T  _Value 
)
247 {
248  return _Target.exchange(_Value);
249 }
const void * _Target(const _XSTD2 type_info &_Info) const _NOEXCEPT
Definition: functional:413
_In_ int _Value
Definition: setjmp.h:173
template<typename _T >
_T Concurrency::details::atomic_increment ( std::atomic< _T > &  _Target)
253 {
254  return _Target.fetch_add(1) + 1;
255 }
const void * _Target(const _XSTD2 type_info &_Info) const _NOEXCEPT
Definition: functional:413
template<class _Ty >
_Ty&& Concurrency::details::declval ( )
template<typename _C , typename _Ty , typename _U >
bool Concurrency::details::operator!= ( const _Concurrent_queue_iterator< _C, _Ty > &  _I,
const _Concurrent_queue_iterator< _C, _U > &  _J 
)
325  {
326  return _I._My_item!=_J._My_item;
327  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator!= ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
351  {
352  return !(_I==_J);
353  }
template<typename _Container , typename _Ty >
_Vector_iterator<_Container,_Ty> Concurrency::details::operator+ ( ptrdiff_t  _Offset,
const _Vector_iterator< _Container, _Ty > &  _Vec 
)
339  {
340  return _Vector_iterator<_Container,_Ty>( *_Vec._My_vector, _Vec._My_index+_Offset );
341  }
Definition: vector:294
template<typename _Container , typename _Ty , typename _U >
ptrdiff_t Concurrency::details::operator- ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
381  {
382  return ptrdiff_t(_I._My_index)-ptrdiff_t(_J._My_index);
383  }
int ptrdiff_t
Definition: vcruntime.h:199
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator< ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
357  {
358  return _I._My_index<_J._My_index && _I._My_vector == _J._My_vector;
359  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator<= ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
375  {
376  return !(_J<_I);
377  }
template<typename _C , typename _Ty , typename _U >
bool Concurrency::details::operator== ( const _Concurrent_queue_iterator< _C, _Ty > &  _I,
const _Concurrent_queue_iterator< _C, _U > &  _J 
)
319  {
320  return _I._My_item==_J._My_item;
321  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator== ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
345  {
346  return _I._My_index==_J._My_index && _I._My_vector == _J._My_vector;
347  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator> ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
363  {
364  return _J<_I;
365  }
template<typename _Container , typename _Ty , typename _U >
bool Concurrency::details::operator>= ( const _Vector_iterator< _Container, _Ty > &  _I,
const _Vector_iterator< _Container, _U > &  _J 
)
369  {
370  return !(_I<_J);
371  }

Variable Documentation

_CONCRTIMP const unsigned char Concurrency::details::_Byte_reverse_table[]
const size_t Concurrency::details::ERROR_MSG_BUFFER_SIZE = 1024
const int Concurrency::details::LOOP_UNROLL_THRESHOLD = 4
static