STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
concrt.h
Go to the documentation of this file.
1 /***
2 * ==++==
3 *
4 * Copyright (c) Microsoft Corporation. All rights reserved.
5 *
6 * ==--==
7 * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
8 *
9 * concrt.h
10 *
11 * Main public header file for ConcRT. This is the only header file a C++ program must include to use the core concurrency runtime features.
12 *
13 * The Agents And Message Blocks Library and the Parallel Patterns Library (PPL) are defined in separate header files.
14 * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
15 ****/
16 
17 #pragma once
18 
19 #include <crtdefs.h>
20 
21 #if !(defined (_M_X64) || defined (_M_IX86) || defined (_M_ARM))
22  #error ERROR: Concurrency Runtime is supported only on X64, X86 and ARM architectures.
23 #endif /* !(defined (_M_X64) || defined (_M_IX86) || defined (_M_ARM)) */
24 
25 #if defined (_M_CEE)
26  #error ERROR: Concurrency Runtime is not supported when compiling /clr.
27 #endif /* defined (_M_CEE) */
28 
29 #ifndef __cplusplus
30  #error ERROR: Concurrency Runtime is supported only for C++.
31 #endif /* __cplusplus */
32 
33 #define _CONCRT_H
34 
35 #include <exception>
36 #include <sal.h>
37 #include <limits.h>
38 #include <crtdbg.h>
39 #include <guiddef.h>
40 #include <intrin.h>
41 #include <new>
42 
43 #pragma pack(push,_CRT_PACKING)
44 #pragma push_macro("new")
45 #undef new
46 
47 // Forward declare structs needed from Windows header files
48 
49 struct _SECURITY_ATTRIBUTES;
50 typedef _SECURITY_ATTRIBUTES* LPSECURITY_ATTRIBUTES;
51 
52 struct _GROUP_AFFINITY;
53 typedef _GROUP_AFFINITY* PGROUP_AFFINITY;
54 
55 // Define essential types needed from Windows header files
56 
57 typedef unsigned long DWORD;
58 #ifndef _HRESULT_DEFINED
59 #define _HRESULT_DEFINED
60 #ifdef __midl
61 typedef LONG HRESULT;
62 #else /* __midl */
63 typedef __success(return >= 0) long HRESULT;
64 #endif /* __midl */
65 #endif /* _HRESULT_DEFINED */
66 typedef void * HANDLE;
67 
68 // Undefine Yield that is possibly defined by windows.h, and _YieldProcessor
69 
70 #undef Yield
71 #undef _YieldProcessor
72 
73 #if (defined (_M_IX86) || defined (_M_X64))
74 #define _YieldProcessor _mm_pause
75 #else /* (defined (_M_IX86) || defined (_M_X64)) */
76 inline void _YieldProcessor() {}
77 #endif /* (defined (_M_IX86) || defined (_M_X64)) */
78 
79 // Make sure the exchange pointer intrinsics works on x86 architecture
80 
81 #if defined (_M_IX86) && !defined(FIXED_592562) // Leave enabled until onflict with inline function in 8.1 SDK winnt.h header is fixed
82 
83 #undef _InterlockedExchangePointer
84 #undef _InterlockedCompareExchangePointer
85 
86 #define _InterlockedExchangePointer(_Target, _Value) reinterpret_cast<void *>(static_cast<__w64 long>(_InterlockedExchange( \
87  static_cast<long volatile *>(reinterpret_cast<__w64 long volatile *>(static_cast<void * volatile *>(_Target))), \
88  static_cast<long>(reinterpret_cast<__w64 long>(static_cast<void *>(_Value))))))
89 
90 #define _InterlockedCompareExchangePointer(_Target, _Exchange, _Comparand) reinterpret_cast<void *>(static_cast<__w64 long>(_InterlockedCompareExchange( \
91  static_cast<long volatile *>(reinterpret_cast<__w64 long volatile *>(static_cast<void * volatile *>(_Target))), \
92  static_cast<long>(reinterpret_cast<__w64 long>(static_cast<void *>(_Exchange))), \
93  static_cast<long>(reinterpret_cast<__w64 long>(static_cast<void *>(_Comparand))))))
94 
95 #endif /* defined (_M_IX86) */
96 
97 #if (defined (_M_IX86) || defined (_M_ARM))
98 
99 #define _InterlockedIncrementSizeT(_Target) static_cast<size_t>(_InterlockedIncrement(reinterpret_cast<long volatile *>(_Target)))
100 #define _InterlockedDecrementSizeT(_Target) static_cast<size_t>(_InterlockedDecrement(reinterpret_cast<long volatile *>(_Target)))
101 #define _InterlockedCompareExchangeSizeT(_Target, _Exchange, _Comparand) static_cast<size_t>(_InterlockedCompareExchange( \
102  reinterpret_cast<long volatile *>(_Target), \
103  static_cast<long>(_Exchange), \
104  static_cast<long>(_Comparand)))
105 
106 typedef _W64 unsigned long DWORD_PTR, *PDWORD_PTR;
107 
108 #else /* (defined (_M_IX86) || defined (_M_ARM)) */
109 
110 #define _InterlockedIncrementSizeT(_Target) static_cast<size_t>(_InterlockedIncrement64(reinterpret_cast<__int64 volatile *>(_Target)))
111 #define _InterlockedDecrementSizeT(_Target) static_cast<size_t>(_InterlockedDecrement64(reinterpret_cast<__int64 volatile *>(_Target)))
112 #define _InterlockedCompareExchangeSizeT(_Target, _Exchange, _Comparand) static_cast<size_t>(_InterlockedCompareExchange64( \
113  reinterpret_cast<__int64 volatile *>(_Target), \
114  static_cast<__int64>(_Exchange), \
115  static_cast<__int64>(_Comparand)))
116 
117 typedef unsigned __int64 DWORD_PTR, *PDWORD_PTR;
118 
119 #endif /* (defined (_M_IX86) || defined (_M_ARM)) */
120 
121 #if defined (_DEBUG)
122 #if _MSC_VER
123 // Turn off compiler warnings that are exacerbated by constructs in this
124 // file's definitions:
125 
126 // Warning C4127: conditional expression is constant. This is caused by
127 // the macros with "do { ... } while (false)" syntax. The syntax is
128 // a good way to ensure that a statement-like macro can be used in all
129 // contexts (specifically if statements), but the compiler warns about
130 // the "while (false)" part.
131 
132 #define _CONCRT_ASSERT(x) __pragma (warning (suppress: 4127)) do {_ASSERTE(x); __assume(x);} while(false)
133 #else
134 #define _CONCRT_ASSERT(x) do {_ASSERTE(x); __assume(x);} while(false)
135 #endif
136 #else /* defined (_DEBUG) */
137 #define _CONCRT_ASSERT(x) __assume(x)
138 #endif /* defined (_DEBUG) */
139 
140 // Used internally to represent the smallest unit in which to allocate hidden types
141 
142 
143 typedef void * _CONCRT_BUFFER;
144 #define _LISTENTRY_SIZE ((2 * sizeof(void *) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER))
145 #define _SAFERWLIST_SIZE ((3 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER))
146 
151 
152 namespace Concurrency
153 {
167 
168 _CRTIMP void __cdecl wait(unsigned int _Milliseconds);
169 
184 
185 _CRTIMP void * __cdecl Alloc(size_t _NumBytes);
186 
199 
200 _CRTIMP void __cdecl Free(_Pre_maybenull_ _Post_invalid_ void * _PAllocation);
201 
206 
207 
208 #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP
209 
231 
232 _CRTIMP void __cdecl set_task_execution_resources(DWORD_PTR _ProcessAffinityMask);
233 
255 
256 _CRTIMP void __cdecl set_task_execution_resources(unsigned short _Count, PGROUP_AFFINITY _PGroupAffinity);
257 
258 #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */
259 
264 
265 typedef void (__cdecl * TaskProc)(void *);
266 
267 //
268 // Forward declarations:
269 //
270 class Scheduler;
271 class ScheduleGroup;
272 class Context;
273 
274 namespace details
275 {
276  //
277  // Forward declarations:
278  //
279  class ContextBase;
280  class _TaskCollectionBase;
281 
282  //
283  // A utility to hide operator delete from certain objects while still allowing the runtime to delete them internally.
284  //
285  template<class _T>
286  void _InternalDeleteHelper(_T * _PObject)
287  {
288  delete _PObject;
289  }
290 
291  // The purpose of the class is solely to direct allocations of ConcRT classes
292  // through a single point, using an internal allocator.
293  struct _AllocBase
294  {
295  // Standard operator new
296  void * operator new(size_t _Size)
297  {
298  return Concurrency::Alloc(_Size);
299  }
300 
301  // Standard operator delete
302  void operator delete(void * _Ptr) throw()
303  {
304  Concurrency::Free(_Ptr);
305  }
306 
307  // Standard operator new, no-throw version
308  void * operator new(size_t _Size, const std::nothrow_t&) throw()
309  {
310  void * _Ptr;
311 
312  try
313  {
314  _Ptr = Concurrency::Alloc(_Size);
315  }
316  catch(...)
317  {
318  _Ptr = NULL;
319  }
320 
321  return (_Ptr);
322  }
323 
324  // Standard operator delete, no-throw version
325  void operator delete(void * _Ptr, const std::nothrow_t&) throw()
326  {
327  operator delete(_Ptr);
328  }
329 
330  // Standard operator new array
331  void * operator new[](size_t _Size)
332  {
333  return operator new(_Size);
334  }
335 
336  // Standard operator delete array
337  void operator delete[](void * _Ptr) throw()
338  {
339  operator delete(_Ptr);
340  }
341 
342  // Standard operator new array, no-throw version
343  void * operator new[](size_t _Size, const std::nothrow_t& _No_throw) throw ()
344  {
345  return operator new(_Size, _No_throw);
346  }
347 
348  // Standard operator delete array, no-throw version
349  void operator delete[](void * _Ptr, const std::nothrow_t& _No_throw) throw()
350  {
351  operator delete(_Ptr, _No_throw);
352  }
353 
354  // Standard operator new with void* placement
355  void * operator new(size_t, void * _Location) throw()
356  {
357  return _Location;
358  }
359 
360  // Standard operator delete with void* placement
361  void operator delete(void *, void *) throw()
362  {
363  }
364 
365  // Standard operator new array with void* placement
366  void * __cdecl operator new[](size_t, void * _Location) throw()
367  {
368  return _Location;
369  }
370 
371  // Standard operator delete array with void* placement
372  void __cdecl operator delete[](void *, void *) throw()
373  {
374  }
375  };
376 
377  // Stubs to allow the header files to access runtime functionality for WINAPI_PARTITION apps.
378  class _Context
379  {
380  public:
381  _CRTIMP _Context(::Concurrency::Context * _PContext = NULL) : _M_pContext(_PContext) {}
382  _CRTIMP static _Context __cdecl _CurrentContext();
383  _CRTIMP static void __cdecl _Yield();
384  _CRTIMP static void __cdecl _Oversubscribe(bool _BeginOversubscription);
385  _CRTIMP bool _IsSynchronouslyBlocked() const;
386  private:
387  ::Concurrency::Context * _M_pContext;
388  };
389 
391  {
392  public:
393  _CRTIMP _Scheduler(::Concurrency::Scheduler * _PScheduler = NULL) : _M_pScheduler(_PScheduler) {}
394  _CRTIMP unsigned int _Reference();
395  _CRTIMP unsigned int _Release();
396  _CRTIMP Concurrency::Scheduler * _GetScheduler() { return _M_pScheduler; }
397 
398  private:
399  ::Concurrency::Scheduler * _M_pScheduler;
400  };
401 
403  {
404  public:
405  _CRTIMP static void __cdecl _ScheduleTask(TaskProc _Proc, void * _Data);
406  _CRTIMP static unsigned int __cdecl _Id();
407  _CRTIMP static unsigned int __cdecl _GetNumberOfVirtualProcessors();
408  _CRTIMP static _Scheduler __cdecl _Get();
409  };
410 
411  //
412  // Wrappers for atomic access
413  //
414  template <size_t _Size>
415  struct _Subatomic_impl { };
416 
417  template<>
418  struct _Subatomic_impl<4> {
419  template <typename _Ty>
420  static void _StoreWithRelease(volatile _Ty& _Location, _Ty _Rhs) {
421  // For the compiler, a volatile write has release semantics. In addition, on ARM,
422  // the volatile write will emit a data memory barrier before the write.
423  _Location = _Rhs;
424  }
425 
426  template <typename _Ty>
427  static _Ty _LoadWithAquire(volatile _Ty& _Location) {
428  // For the compiler, a volatile read has acquire semantics. In addition, on ARM,
429  // the volatile read will emit a data memory barrier after the read.
430  return _Location;
431  }
432 
433  template <typename _Ty>
434  static _Ty _CompareAndSwap(volatile _Ty& _Location, _Ty _NewValue, _Ty _Comperand) {
435  return (_Ty)_InterlockedCompareExchange((volatile long*)&_Location, (long)_NewValue, (long)_Comperand);
436  }
437 
438  template <typename _Ty>
439  static _Ty _FetchAndAdd(volatile _Ty& _Location, _Ty _Addend) {
440  return (_Ty)_InterlockedExchangeAdd((volatile long*)&_Location, (long)_Addend);
441  }
442 
443  template <typename _Ty>
444  static _Ty _Increment(volatile _Ty& _Location) {
445  return (_Ty)_InterlockedIncrement((volatile long*)&_Location);
446  }
447 
448  template <typename _Ty>
449  static _Ty _Decrement(volatile _Ty& _Location) {
450  return (_Ty)_InterlockedDecrement((volatile long*)&_Location);
451  }
452  };
453 
454 #if defined (_M_X64)
455  template<>
456  struct _Subatomic_impl<8> {
457  template <typename _Ty>
458  static void _StoreWithRelease(volatile _Ty& _Location, _Ty _Rhs) {
459  // For the compiler, a volatile write has release semantics.
460  _Location = _Rhs;
461  }
462 
463  template <typename _Ty>
464  static _Ty _LoadWithAquire(volatile _Ty& _Location) {
465  // For the compiler, a volatile read has acquire semantics.
466  return _Location;
467  }
468 
469  template <typename _Ty>
470  static _Ty _CompareAndSwap(volatile _Ty& _Location, _Ty _NewValue, _Ty _Comperand) {
471  return (_Ty)_InterlockedCompareExchange64((volatile __int64*)&_Location, (__int64)_NewValue, (__int64)_Comperand);
472  }
473 
474  template <typename _Ty>
475  static _Ty _FetchAndAdd(volatile _Ty& _Location, _Ty _Addend) {
476  return (_Ty)_InterlockedExchangeAdd64((volatile __int64*)&_Location, (__int64)_Addend);
477  }
478 
479  template <typename _Ty>
480  static _Ty _Increment(volatile _Ty& _Location) {
481  return (_Ty)_InterlockedIncrement64((volatile __int64*)&_Location);
482  }
483 
484  template <typename _Ty>
485  static _Ty _Decrement(volatile _Ty& _Location) {
486  return (_Ty)_InterlockedDecrement64((volatile __int64*)&_Location);
487  }
488  };
489 #endif /* defined (_M_X64) */
490 
491 
492  //
493  // Wrapper for atomic access. Only works for 4-byte or 8-byte types (for example, int, long, long long, size_t, pointer).
494  // Anything else might fail to compile.
495  //
496  template <typename _Ty>
497  class _Subatomic {
498  private:
499  volatile _Ty _M_value;
500 
501  public:
502  operator _Ty() const volatile {
504  }
505 
506  _Ty operator=(_Ty _Rhs) {
508  return _Rhs;
509  }
510 
511  _Ty _CompareAndSwap(_Ty _NewValue, _Ty _Comperand) {
512  return _Subatomic_impl<sizeof(_Ty)>::_CompareAndSwap(_M_value, _NewValue, _Comperand);
513  }
514 
515  _Ty _FetchAndAdd(_Ty _Addend) {
517  }
518 
519  _Ty operator++() {
521  }
522 
523  _Ty operator++(int) {
525  }
526 
527  _Ty operator--() {
529  }
530 
531  _Ty operator--(int) {
533  }
534 
535  _Ty operator+=(_Ty _Addend) {
536  return _FetchAndAdd(_Addend) + _Addend;
537  }
538  };
539 
540  //
541  // An internal exception that is used for cancellation. Users do not "see" this exception except through the
542  // resulting stack unwind. This exception should never be intercepted by user code. It is intended
543  // for use by the runtime only.
544  //
545  class _Interruption_exception : public std::exception
546  {
547  public:
548  explicit _CRTIMP _Interruption_exception(const char * _Message) throw();
550  };
551 
552  //
553  // An RAII class that spin-waits on a "rented" flag.
554  //
555  class _SpinLock
556  {
557  private:
558  volatile long& _M_flag;
559 
560  public:
561  _CRTIMP _SpinLock(volatile long& _Flag);
563 
564  private:
565  _SpinLock(const _SpinLock&);
566  void operator=(const _SpinLock&);
567  };
568 
569  //
570  // A class that holds the count used for spinning and is dependent
571  // on the number of hardware threads
572  //
573  struct _SpinCount
574  {
575  // Initializes the spinCount to either 0 or SPIN_COUNT, depending on
576  // the number of hardware threads.
577  static void __cdecl _Initialize();
578 
579  // Returns the current value of s_spinCount
580  _CRTIMP static unsigned int __cdecl _Value();
581 
582  // The number of iterations used for spinning
583  static unsigned int _S_spinCount;
584  };
585 
589 
590  void _CRTIMP __cdecl _UnderlyingYield();
591 
596 
597  unsigned int _CRTIMP __cdecl _GetConcurrency();
598 
602 
603  template<unsigned int _YieldCount = 1>
605  {
606  public:
607 
608  typedef void (__cdecl *_YieldFunction)();
609 
613 
614  _SpinWait(_YieldFunction _YieldMethod = _UnderlyingYield)
615  : _M_yieldFunction(_YieldMethod), _M_state(_StateInitial)
616  {
617  // Defer initialization of other fields to _SpinOnce().
618  }
619 
623 
624  void _SetSpinCount(unsigned int _Count)
625  {
626  _CONCRT_ASSERT(_M_state == _StateInitial);
627  if (_Count == 0)
628  {
629  // Specify a count of 0 if we are on a single proc.
630  _M_state = _StateSingle;
631  }
632  else
633  {
634  _M_currentSpin = _Count;
635  _M_currentYield = _YieldCount;
636  _M_state = _StateSpin;
637  }
638  }
639 
651 
652  bool _SpinOnce()
653  {
654  switch (_M_state)
655  {
656  case _StateSpin:
657  {
658  unsigned long _Count = _NumberOfSpins();
659 
660  for (unsigned long _I = 0; _I < _Count; _I++)
661  {
662  _YieldProcessor();
663  }
664 
665  if (!_ShouldSpinAgain())
666  {
667  _M_state = (_M_currentYield == 0) ? _StateBlock : _StateYield;
668  }
669 
670  return true;
671  }
672 
673  case _StateYield:
674  _CONCRT_ASSERT(_M_currentYield > 0);
675  if (--_M_currentYield == 0)
676  {
677  _M_state = _StateBlock;
678  }
679 
680  // Execute the yield
681  _DoYield();
682  return true;
683 
684  case _StateBlock:
685  // Reset to defaults if client does not block
686  _Reset();
687  return false;
688 
689  case _StateSingle:
690  // No need to spin on a single processor: just execute the yield
691  _DoYield();
692  return false;
693 
694  case _StateInitial:
695  // Reset counters to their default value and Spin once.
696  _Reset();
697  return _SpinOnce();
698  default:
699  // Unreached
700  return false;
701  };
702  }
703 
704  protected:
705 
709 
711  {
716  _StateSingle
717  };
718 
722 
723  void _DoYield()
724  {
725  bool _ShouldYield = (_YieldCount != 0);
726  if (_ShouldYield)
727  {
728  _CONCRT_ASSERT(_M_yieldFunction != NULL);
729  _M_yieldFunction();
730  }
731  else
732  {
733  _YieldProcessor();
734  }
735  }
736 
740 
741  void _Reset()
742  {
743  _M_state = _StateInitial;
744 
745  // Reset to the default spin value. The value specified
746  // by the client is ignored on a reset.
747  _SetSpinCount(_SpinCount::_Value());
748 
749  _CONCRT_ASSERT(_M_state != _StateInitial);
750  }
751 
758 
759  unsigned long _NumberOfSpins()
760  {
761  return 1;
762  }
763 
770 
772  {
773  return (--_M_currentSpin > 0);
774  }
775 
776  unsigned long _M_currentSpin;
777  unsigned long _M_currentYield;
779  _YieldFunction _M_yieldFunction;
780  };
781 
784 
785  //
786  // This reentrant lock uses CRITICAL_SECTION and is intended for use when kernel blocking
787  // is desirable and where it is either known that the lock will be taken recursively in
788  // the same thread, or not known that a non-reentrant lock can be used safely.
789  //
791  {
792  public:
793  // Constructor for _ReentrantBlockingLock
795 
796  // Destructor for _ReentrantBlockingLock
798 
799  // Acquire the lock, spin if necessary
800  _CRTIMP void _Acquire();
801 
802  // Tries to acquire the lock, does not spin.
803  // Returns true if the acquisition worked, false otherwise
804  _CRTIMP bool _TryAcquire();
805 
806  // Releases the lock
807  _CRTIMP void _Release();
808 
809 
810  // An exception safe RAII wrapper.
812  {
813  public:
814  // Constructs a holder and acquires the specified lock
815  explicit _Scoped_lock(_ReentrantBlockingLock& _Lock) : _M_lock(_Lock)
816  {
817  _M_lock._Acquire();
818  }
819 
820  // Destroys the holder and releases the lock
822  {
823  _M_lock._Release();
824  }
825  private:
826  _ReentrantBlockingLock& _M_lock;
827 
828  _Scoped_lock(const _Scoped_lock&); // no copy constructor
829  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
830  };
831 
832  private:
833  // Critical section requires windows.h. Hide the implementation so that
834  // user code need not include windows.
835  _CONCRT_BUFFER _M_criticalSection[(4 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
836  };
837 
838  //
839  // This reentrant lock is a pure spin lock and is intended for use when kernel blocking
840  // is desirable and where it is either known that the lock will be taken recursively in
841  // the same thread, or not known that a non-reentrant lock can be used safely.
842  //
844  {
845  public:
846  // Constructor for _ReentrantLock
848 
849  // Acquire the lock, spin if necessary
850  _CRTIMP void _Acquire();
851 
852  // Tries to acquire the lock, does not spin
853  // Returns true if the acquisition worked, false otherwise
854  _CRTIMP bool _TryAcquire();
855 
856  // Releases the lock
857  _CRTIMP void _Release();
858 
859  // An exception safe RAII wrapper.
861  {
862  public:
863  // Constructs a holder and acquires the specified lock
864  explicit _Scoped_lock(_ReentrantLock& _Lock) : _M_lock(_Lock)
865  {
866  _M_lock._Acquire();
867  }
868 
869  // Destroys the holder and releases the lock
871  {
872  _M_lock._Release();
873  }
874  private:
875  _ReentrantLock& _M_lock;
876 
877  _Scoped_lock(const _Scoped_lock&); // no copy constructor
878  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
879  };
880 
881  private:
883  volatile long _M_owner;
884  };
885 
886  //
887  // This non-reentrant lock uses CRITICAL_SECTION and is intended for use in situations
888  // where it is known that the lock will not be taken recursively, and can be more
889  // efficiently implemented.
890  //
892  {
893  public:
894  // Constructor for _NonReentrantBlockingLock
895  //
896  // The constructor is exported because _NonReentrantLock is
897  // included in DevUnitTests.
899 
900  // Constructor for _NonReentrantBlockingLock
902 
903  // Acquire the lock, spin if necessary
904  _CRTIMP void _Acquire();
905 
906  // Tries to acquire the lock, does not spin
907  // Returns true if the lock is taken, false otherwise
908  _CRTIMP bool _TryAcquire();
909 
910  // Releases the lock
911  _CRTIMP void _Release();
912 
913  // An exception safe RAII wrapper.
915  {
916  public:
917  // Constructs a holder and acquires the specified lock
918  explicit _Scoped_lock(_NonReentrantBlockingLock& _Lock) : _M_lock(_Lock)
919  {
920  _M_lock._Acquire();
921  }
922 
923  // Destroys the holder and releases the lock
925  {
926  _M_lock._Release();
927  }
928  private:
929  _NonReentrantBlockingLock& _M_lock;
930 
931  _Scoped_lock(const _Scoped_lock&); // no copy constructor
932  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
933  };
934 
935  private:
936  // Critical section requires windows.h. Hide the implementation so that
937  // user code need not include windows.h
938  _CONCRT_BUFFER _M_criticalSection[(4 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
939  };
940 
941  //
942  // A Reader-Writer Lock is intended for use in situations with many readers and rare
943  // writers.
944  //
945  // A writer request immediately blocks future readers and then waits until all current
946  // readers drain. A reader request does not block future writers and must wait until
947  // all writers are done, even those that cut in front of it. In any race between a
948  // reader and a writer, the writer always wins.
949  //
951  {
952  public:
953  // Constructor for _ReaderWriterLock
954  //
955  // The constructor and destructor are exported because _ReaderWriterLock is
956  // included in DevUnitTests.
958 
959  // Acquire lock for reading. Spins until all writers finish, new writers
960  // can cut in front of a waiting reader.
961  _CRTIMP void _AcquireRead();
962 
963  // Release lock for reading. The last reader changes m_state to State.kFree
964  _CRTIMP void _ReleaseRead();
965 
966  // Acquire lock for writing. Spin until no readers exist, then acquire lock
967  // and prevent new readers.
968  _CRTIMP void _AcquireWrite();
969 
970  // Release lock for writing.
971  _CRTIMP void _ReleaseWrite();
972 
973  // Try to acquire the write lock, do not spin if unable to acquire.
974  // Returns true if the acquisition worked, false otherwise
975  _CRTIMP bool _TryAcquireWrite();
976 
977  // Returns true if it is in write state, false otherwise
978  bool _HasWriteLock() const
979  {
980  return (_M_state == _Write);
981  }
982 
983  // Guarantees that all writers are out of the lock. This does nothing if there are no pending writers.
984  void _FlushWriteOwners();
985 
986  // An exception safe RAII wrapper.
988  {
989  public:
990  // Constructs a holder and acquires the writer lock
991  explicit _Scoped_lock(_ReaderWriterLock& _Lock) : _M_lock(_Lock)
992  {
994  }
995 
996  // Destroys the holder and releases the writer lock
998  {
1000  }
1001 
1002  private:
1003 
1004  _ReaderWriterLock& _M_lock;
1005 
1006  _Scoped_lock(const _Scoped_lock&); // no copy constructor
1007  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
1008  };
1009 
1010  // An exception safe RAII wrapper for reads.
1012  {
1013  public:
1014  // Constructs a holder and acquires the reader lock
1015  explicit _Scoped_lock_read(_ReaderWriterLock& _Lock) : _M_lock(_Lock)
1016  {
1018  }
1019 
1020  // Destroys the holder and releases the reader lock
1022  {
1024  }
1025 
1026  private:
1027 
1028  _ReaderWriterLock& _M_lock;
1029 
1030  _Scoped_lock_read(const _Scoped_lock_read&); // no copy constructor
1031  _Scoped_lock_read const & operator=(const _Scoped_lock_read&); // no assignment operator
1032  };
1033 
1034  private:
1035  // State enum where:
1036  // -1 --> write mode
1037  // 0 --> free
1038  // n > 0 --> n readers have locked in read mode.
1039  enum _State
1040  {
1041  _Write = -1,
1042  _Free = 0,
1043  _Read = 1
1044  };
1045 
1046  // The current state of the lock, mapping to the State enum. This is also
1047  // an indicator of the number of readers holding the lock, for any number > 0.
1048  volatile long _M_state;
1049 
1050  // A writer increments this as soon as it wants to lock and decrements this
1051  // after releasing the lock. To prevent writers from starving, a reader will
1052  // wait until this counter is zero, and only then will try to obtain the lock.
1053  volatile long _M_numberOfWriters;
1054 
1055  // Spin-Wait-Until variant
1056  static void __cdecl _WaitEquals(volatile const long& _Location, long _Value, long _Mask = 0xFFFFFFFF);
1057  };
1058 
1059  //
1060  // Exception safe RAII wrappers for _malloca()
1061  //
1062 
1063  //
1064  // _MallocaArrayHolder is used when the allocation size is known up front, and the memory must be allocated in a contiguous space
1065  //
1066  template<typename _ElemType>
1068  {
1069  public:
1070 
1072 
1073  // _Initialize takes the pointer to the memory allocated by the user using _malloca
1074  void _Initialize(_ElemType * _Elem)
1075  {
1076  // The object must be initialized exactly once
1078  _M_ElemArray = _Elem;
1079  _M_ElemsConstructed = 0;
1080  }
1081 
1082  // _InitOnRawMalloca take the raw pointer returned by _malloca directly
1083  // It will initialize itself with that pointer and return a strong typed pointer.
1084  // To be noted that the constructor will NOT be called.
1085  _ElemType * _InitOnRawMalloca(void * _MallocaRet)
1086  {
1087  if (_MallocaRet == nullptr)
1088  throw std::bad_alloc();
1089  _Initialize(static_cast<_ElemType *>(_MallocaRet));
1090  return static_cast<_ElemType *>(_MallocaRet);
1091  }
1092 
1093  // Register the next slot for destruction. Because we only keep the index of the last slot to be destructed,
1094  // this method must be called sequentially from 0 to N where N < _ElemCount.
1096  {
1097  _CONCRT_ASSERT(_M_ElemArray != NULL); // must already be initialized
1099  }
1100 
1102  {
1103  for( size_t _I=0; _I < _M_ElemsConstructed; ++_I )
1104  {
1105  _M_ElemArray[_I]._ElemType::~_ElemType();
1106  }
1107  // Works even when object was not initialized, that is, _M_ElemArray == NULL
1108  _freea(_M_ElemArray);
1109  }
1110  private:
1111  _ElemType * _M_ElemArray;
1113 
1114  // Copy construction and assignment are not supported.
1117  };
1118 
1119  //
1120  // _MallocaListHolder is used when the allocation size is not known up front, and the elements are added to the list dynamically
1121  //
1122  template<typename _ElemType>
1124  {
1125  public:
1126  // Returns the size required to allocate the payload itself and the pointer to the next element
1127  size_t _GetAllocationSize() const
1128  {
1129  return sizeof(_ElemNodeType);
1130  }
1131 
1133  {
1134  }
1135 
1136  // Add the next element to the list. The memory is allocated in the caller's frame by _malloca
1137  void _AddNode(_ElemType * _Elem)
1138  {
1139  _ElemNodeType * _Node = reinterpret_cast<_ElemNodeType *>(_Elem);
1140  _Node->_M_Next = _M_FirstNode;
1141  _M_FirstNode = reinterpret_cast<_ElemNodeType *>(_Elem);
1142  }
1143 
1144  // _AddRawMallocaNode take the raw pointer returned by _malloca directly
1145  // It will add that bucket of memory to the list and return a strong typed pointer.
1146  // To be noted that the constructor will NOT be called.
1147  _ElemType * _AddRawMallocaNode(void * _MallocaRet)
1148  {
1149  if (_MallocaRet == nullptr)
1150  throw std::bad_alloc();
1151  _AddNode(static_cast<_ElemType *>(_MallocaRet));
1152  return static_cast<_ElemType *>(_MallocaRet);
1153  }
1154 
1155  // Walk the list and destruct, then free each element
1157  {
1158  for( _ElemNodeType * _Node = _M_FirstNode; _Node != NULL; )
1159  {
1160  auto _M_Next = _Node->_M_Next;
1161  _Node->_M_Elem._ElemType::~_ElemType();
1162  _freea(_Node);
1163  _Node = _M_Next;
1164  }
1165  }
1166 
1167  private:
1168 
1170  {
1171  friend class _MallocaListHolder;
1172  _ElemType _M_Elem;
1174  // Always instantiated using malloc, so default constructor and destructor are not needed.
1175  _ElemNodeType();
1176  ~_ElemNodeType();
1177  // Copy construction and assignment are not supported.
1178  _ElemNodeType(const _ElemNodeType & );
1180  };
1181 
1183 
1184  // Copy construction and assignment are not supported.
1187  };
1188 
1189  // Forward declarations
1191  class _TaskCollection;
1192  class _UnrealizedChore;
1193 } // namespace details
1194 
1195 //**************************************************************************
1196 // Public Namespace:
1197 //
1198 // Anything in the Concurrency namespace is intended for direct client consumption.
1199 //
1200 //**************************************************************************
1201 
1210 
1211 class scheduler_resource_allocation_error : public std::exception
1212 {
1213 public:
1223 
1224  _CRTIMP scheduler_resource_allocation_error(_In_z_ const char * _Message, HRESULT _Hresult) throw();
1225 
1232 
1233  explicit _CRTIMP scheduler_resource_allocation_error(HRESULT _Hresult) throw();
1234 
1241 
1242  _CRTIMP HRESULT get_error_code() const throw();
1243 
1244 private:
1245  HRESULT _Hresult;
1246 };
1247 
1257 
1259 {
1260 public:
1270 
1271  _CRTIMP scheduler_worker_creation_error(_In_z_ const char * _Message, HRESULT _Hresult) throw();
1272 
1279 
1280  explicit _CRTIMP scheduler_worker_creation_error(HRESULT _Hresult) throw();
1281 };
1282 
1286 
1287 class unsupported_os : public std::exception
1288 {
1289 public:
1296 
1297  explicit _CRTIMP unsupported_os(_In_z_ const char * _Message) throw();
1298 
1302 
1303  _CRTIMP unsupported_os() throw();
1304 };
1305 
1312 
1313 class scheduler_not_attached : public std::exception
1314 {
1315 public:
1322 
1323  explicit _CRTIMP scheduler_not_attached(_In_z_ const char * _Message) throw();
1324 
1328 
1329  _CRTIMP scheduler_not_attached() throw();
1330 };
1331 
1338 
1339 class improper_scheduler_attach : public std::exception
1340 {
1341 public:
1348 
1349  explicit _CRTIMP improper_scheduler_attach(_In_z_ const char * _Message) throw();
1350 
1354 
1356 };
1357 
1365 
1366 class improper_scheduler_detach : public std::exception
1367 {
1368 public:
1369 
1376 
1377  explicit _CRTIMP improper_scheduler_detach(_In_z_ const char * _Message) throw();
1378 
1382 
1384 };
1385 
1392 
1393 class improper_scheduler_reference : public std::exception
1394 {
1395 public:
1396 
1403 
1404  explicit _CRTIMP improper_scheduler_reference(_In_z_ const char* _Message) throw();
1405 
1409 
1411 };
1412 
1418 
1419 class default_scheduler_exists : public std::exception
1420 {
1421 public:
1428 
1429  explicit _CRTIMP default_scheduler_exists(_In_z_ const char * _Message) throw();
1430 
1434 
1436 };
1437 
1451 
1452 class context_unblock_unbalanced : public std::exception
1453 {
1454 public:
1461 
1462  explicit _CRTIMP context_unblock_unbalanced(_In_z_ const char * _Message) throw();
1463 
1467 
1469 };
1470 
1477 
1478 class context_self_unblock : public std::exception
1479 {
1480 public:
1487 
1488  explicit _CRTIMP context_self_unblock(_In_z_ const char * _Message) throw();
1489 
1493 
1494  _CRTIMP context_self_unblock() throw();
1495 };
1496 
1513 
1514 class missing_wait : public std::exception
1515 {
1516 public:
1523 
1524  explicit _CRTIMP missing_wait(_In_z_ const char * _Message) throw();
1525 
1529 
1530  _CRTIMP missing_wait() throw();
1531 };
1532 
1542 
1543 class bad_target : public std::exception
1544 {
1545 public:
1552 
1553  explicit _CRTIMP bad_target(_In_z_ const char * _Message) throw();
1554 
1558 
1559  _CRTIMP bad_target() throw();
1560 };
1561 
1566 
1567 class message_not_found : public std::exception
1568 {
1569 public:
1576 
1577  explicit _CRTIMP message_not_found(_In_z_ const char * _Message) throw();
1578 
1582 
1583  _CRTIMP message_not_found() throw();
1584 };
1585 
1592 
1593 class invalid_link_target : public std::exception
1594 {
1595 public:
1602 
1603  explicit _CRTIMP invalid_link_target(_In_z_ const char * _Message) throw();
1604 
1608 
1609  _CRTIMP invalid_link_target() throw();
1610 };
1611 
1621 
1622 class invalid_scheduler_policy_key : public std::exception
1623 {
1624 public:
1631 
1632  explicit _CRTIMP invalid_scheduler_policy_key(_In_z_ const char * _Message) throw();
1633 
1637 
1639 };
1640 
1649 
1650 class invalid_scheduler_policy_value : public std::exception
1651 {
1652 public:
1659 
1660  explicit _CRTIMP invalid_scheduler_policy_value(_In_z_ const char * _Message) throw();
1661 
1665 
1667 };
1668 
1677 
1679 {
1680 public:
1687 
1688  explicit _CRTIMP invalid_scheduler_policy_thread_specification(_In_z_ const char * _Message) throw();
1689 
1693 
1695 };
1696 
1704 
1705 class invalid_operation : public std::exception
1706 {
1707 public:
1714 
1715  explicit _CRTIMP invalid_operation(_In_z_ const char * _Message) throw();
1716 
1720 
1721  _CRTIMP invalid_operation() throw();
1722 };
1723 
1738 
1739 class nested_scheduler_missing_detach : public std::exception
1740 {
1741 public:
1748 
1749  explicit _CRTIMP nested_scheduler_missing_detach(_In_z_ const char * _Message) throw();
1750 
1754 
1756 };
1757 
1761 
1762 class operation_timed_out : public std::exception
1763 {
1764 public:
1771 
1772  explicit _CRTIMP operation_timed_out(_In_z_ const char * _Message) throw();
1773 
1777 
1778  _CRTIMP operation_timed_out() throw();
1779 };
1780 
1795 
1796 class invalid_multiple_scheduling : public std::exception
1797 {
1798 public:
1805 
1806  explicit _CRTIMP invalid_multiple_scheduling(_In_z_ const char * _Message) throw();
1807 
1811 
1813 };
1814 
1821 
1822 class invalid_oversubscribe_operation : public std::exception
1823 {
1824 public:
1831 
1832  explicit _CRTIMP invalid_oversubscribe_operation(_In_z_ const char * _Message) throw();
1833 
1837 
1839 };
1840 
1850 
1851 class improper_lock : public std::exception
1852 {
1853 public:
1854 
1861 
1862  explicit _CRTIMP improper_lock(_In_z_ const char * _Message) throw();
1863 
1867 
1868  _CRTIMP improper_lock() throw();
1869 };
1870 
1878 
1879 class task_canceled : public std::exception
1880 {
1881 public:
1888 
1889  explicit _CRTIMP task_canceled(_In_z_ const char * _Message) throw();
1890 
1894 
1895  _CRTIMP task_canceled() throw();
1896 };
1897 
1901 
1903 {
1904 public:
1905 
1912 
1914  _M_type(_System),
1915  _M_reserved(0),
1916  _M_pBinding(NULL),
1917  _M_ptr(NULL)
1918  {
1919  }
1920 
1924 
1926  {
1927  _Assign(_Src);
1928  }
1929 
1930 #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP
1931 
1941 
1942  _CRTIMP static location __cdecl from_numa_node(unsigned short _NumaNodeNumber);
1943 
1944 #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */
1945 
1952 
1953  _CRTIMP static location __cdecl current();
1954 
1961 
1963  {
1964  _Assign(_Rhs);
1965  return *this;
1966  }
1967 
1971 
1973  {
1974  }
1975 
1982 
1983  bool operator==(const location& _Rhs) const
1984  {
1985  return (_M_type == _Rhs._M_type && _M_ptr == _Rhs._M_ptr);
1986  }
1987 
1994 
1995  bool operator!=(const location& _Rhs) const
1996  {
1997  return !operator==(_Rhs);
1998  }
1999 
2000  //**************************************************
2001  //
2002  // Runtime internal public pieces of location. No code outside the core of ConcRT can depend on anything
2003  // below. It is internal implementation detail:
2004  //
2005 
2009 
2010  _CRTIMP static location __cdecl _Current_node();
2011 
2015 
2016  enum _Type
2017  {
2021  _System, // _M_id is meaningless
2022 
2026  _NumaNode, // _M_id is the Windows NUMA node number
2027 
2031  _SchedulingNode, // _M_id is the unique identifier for the scheduling node
2032 
2036  _ExecutionResource, // _M_id is the unique identifier for the execution resource
2037  };
2038 
2042 
2043  location(_Type _LocationType, unsigned int _Id, unsigned int _BindingId = 0, _Inout_opt_ void *_PBinding = NULL);
2044 
2055 
2056  bool _FastVPIntersects(const location& _Rhs) const;
2057 
2068 
2069  bool _FastNodeIntersects(const location& _Rhs) const;
2070 
2074 
2075  void _Assign(const location& _Rhs)
2076  {
2077  _M_type = _Rhs._M_type;
2078  _M_reserved = _Rhs._M_reserved;
2079 
2080  _M_ptr = _Rhs._M_ptr;
2081 
2082  _M_bindingId = _Rhs._M_bindingId;
2083  _M_pBinding = _Rhs._M_pBinding;
2084  }
2085 
2089 
2090  bool _Is_system() const
2091  {
2092  return (_Type)_M_type == _System;
2093  }
2094 
2098 
2099  template<typename T>
2100  T* _As() const
2101  {
2102  return reinterpret_cast<T *>(_M_pBinding);
2103  }
2104 
2108 
2109  unsigned int _GetId() const
2110  {
2111  return _M_id;
2112  }
2113 
2117 
2118  _Type _GetType() const
2119  {
2120  return (_Type)_M_type;
2121  }
2122 
2126 
2127  unsigned int _GetBindingId() const
2128  {
2129  return _M_bindingId;
2130  }
2131 
2132 private:
2133 
2134  // Indicates the type of location (as _Type)
2135  unsigned int _M_type : 28;
2136 
2137  // Flags on the location. Reserved for future use.
2138  unsigned int _M_reserved : 4;
2139 
2140  // If the location has a tight binding, this is the unique identifier of the scheduler to which the binding has specific meaning.
2141  unsigned int _M_bindingId;
2142 
2143  // Defines the agnostic (abstract hardware) binding of the location.
2144  union
2145  {
2146  // The identifier for the binding (NUMA node number, scheduler node ID, execution resource ID)
2147  unsigned int _M_id;
2148 
2149  // Pointer binding.
2150  void *_M_ptr;
2151  };
2152 
2153  // The specific binding to a scheduler. (For example, a specific virtual processor for something like location::current() )
2154  // This will be NULL if there is no tight binding.
2156 };
2157 
2158 #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP
2159 
2168 
2169 class ScheduleGroup
2170 {
2171 public:
2172 
2187 
2188  virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data) =0;
2189 
2196 
2197  virtual unsigned int Id() const =0;
2198 
2215 
2216  virtual unsigned int Reference() =0;
2217 
2236 
2237  virtual unsigned int Release() =0;
2238 
2239 protected:
2240 
2241  //
2242  // Privatize operator delete. Clients should utilize Release to relinquish a schedule group.
2243  //
2244  template<class _T> friend void Concurrency::details::_InternalDeleteHelper(_T * _PObject);
2245 
2246  virtual ~ScheduleGroup() {};
2247 };
2248 
2254 
2255 const unsigned int MaxExecutionResources = 0xFFFFFFFF;
2256 
2262 
2263 const unsigned int INHERIT_THREAD_PRIORITY = 0x0000F000;
2264 
2273 
2274 enum PolicyElementKey
2275 {
2282 
2283  SchedulerKind,
2284 
2294 
2295  MaxConcurrency,
2296 
2307 
2308  MinConcurrency,
2309 
2316 
2317  TargetOversubscriptionFactor,
2318 
2327 
2328  LocalContextCacheSize,
2329 
2335 
2336  ContextStackSize,
2337 
2345 
2346  ContextPriority,
2347 
2354 
2355  SchedulingProtocol,
2356 
2365 
2366  DynamicProgressFeedback,
2367 
2375 
2376  WinRTInitialization,
2377 
2381 
2382  MaxPolicyElementKey
2383 };
2384 
2391 
2392 enum SchedulerType
2393 {
2397 
2398  ThreadScheduler,
2399 
2405 
2406  UmsThreadDefault = ThreadScheduler
2407 };
2408 
2409 #pragma deprecated(UmsThreadDefault)
2410 
2417 
2418 enum SchedulingProtocolType
2419 {
2425 
2426  EnhanceScheduleGroupLocality,
2427 
2432 
2433  EnhanceForwardProgress
2434 };
2435 
2443 
2444 enum DynamicProgressFeedbackType
2445 {
2452 
2453  ProgressFeedbackDisabled,
2454 
2461 
2462  ProgressFeedbackEnabled
2463 };
2464 
2471 
2472 enum WinRTInitializationType
2473 {
2478 
2479  InitializeWinRTAsMTA,
2480 
2485 
2486  DoNotInitializeWinRT
2487 };
2488 
2501 
2502 class SchedulerPolicy
2503 {
2504 public:
2505 
2524 
2525  _CRTIMP SchedulerPolicy();
2526 
2548 
2549  _CRTIMP SchedulerPolicy(size_t _PolicyKeyCount, ...);
2550 
2572 
2573  _CRTIMP SchedulerPolicy(const SchedulerPolicy& _SrcPolicy);
2574 
2591 
2592  _CRTIMP SchedulerPolicy& operator=(const SchedulerPolicy& _RhsPolicy);
2593 
2597 
2598  _CRTIMP ~SchedulerPolicy();
2599 
2615 
2616  _CRTIMP unsigned int GetPolicyValue(PolicyElementKey _Key) const;
2617 
2641 
2642  _CRTIMP unsigned int SetPolicyValue(PolicyElementKey _Key, unsigned int _Value);
2643 
2662 
2663  _CRTIMP void SetConcurrencyLimits(unsigned int _MinConcurrency, unsigned int _MaxConcurrency = MaxExecutionResources);
2664 
2673 
2674  void _ValidateConcRTPolicy() const;
2675 
2676 private:
2677 
2678  struct _PolicyBag
2679  {
2680  union
2681  {
2682  unsigned int _M_pPolicyBag[MaxPolicyElementKey];
2683  struct
2684  {
2685  SchedulerType _M_schedulerKind;
2686  unsigned int _M_maxConcurrency;
2687  unsigned int _M_minConcurrency;
2688  unsigned int _M_targetOversubscriptionFactor;
2689  unsigned int _M_localContextCacheSize;
2690  unsigned int _M_contextStackSize;
2691  unsigned int _M_contextPriority;
2692  SchedulingProtocolType _M_schedulingProtocol;
2693  DynamicProgressFeedbackType _M_dynamicProgressFeedback;
2694  WinRTInitializationType _M_WinRTInitialization;
2695  } _M_specificValues;
2696  } _M_values;
2697  } *_M_pPolicyBag;
2698 
2702 
2703  void _Initialize(size_t _PolicyKeyCount, va_list * _PArgs);
2704 
2708 
2709  void _Assign(const SchedulerPolicy& _SrcPolicy);
2710 
2714 
2715  static bool __cdecl _ValidPolicyKey(PolicyElementKey _Key);
2716 
2720 
2721  static bool __cdecl _ValidPolicyValue(PolicyElementKey _Key, unsigned int _Value);
2722 
2726 
2727  static bool __cdecl _AreConcurrencyLimitsValid(unsigned int _MinConcurrency, unsigned int _MaxConcurrency);
2728  bool _AreConcurrencyLimitsValid() const;
2729 
2733 
2734  bool _ArePolicyCombinationsValid() const;
2735 
2739 
2740  void _ResolvePolicyValues();
2741 
2745 
2746  static char * __cdecl _StringFromPolicyKey(unsigned int _Index);
2747 };
2748 
2760 
2761 class CurrentScheduler
2762 {
2763 private:
2764  CurrentScheduler() {}
2765 
2766 public:
2776 
2777  _CRTIMP static unsigned int __cdecl Id();
2778 
2790 
2791  _CRTIMP static SchedulerPolicy __cdecl GetPolicy();
2792 
2804 
2805  _CRTIMP static Scheduler * __cdecl Get();
2806 
2819 
2820  _CRTIMP static unsigned int __cdecl GetNumberOfVirtualProcessors();
2821 
2844 
2845  _CRTIMP static void __cdecl Create(const SchedulerPolicy& _Policy);
2846 
2863 
2864  _CRTIMP static void __cdecl Detach();
2865 
2879 
2880  _CRTIMP static void __cdecl RegisterShutdownEvent(HANDLE _ShutdownEvent);
2881 
2902 
2903  _CRTIMP static ScheduleGroup * __cdecl CreateScheduleGroup();
2904 
2928 
2929  _CRTIMP static ScheduleGroup * __cdecl CreateScheduleGroup(location& _Placement);
2930 
2949 
2950  _CRTIMP static void __cdecl ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data);
2951 
2973 
2974  _CRTIMP static void __cdecl ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data, location& _Placement);
2975 
2992 
2993  _CRTIMP static bool __cdecl IsAvailableLocation(const location& _Placement);
2994 };
2995 
3010 
3011 class Scheduler
3012 {
3013 protected:
3027 
3028  Scheduler() {}
3029 
3033 
3034  virtual ~Scheduler() {}
3035 
3036 public:
3037 
3061 
3062  _CRTIMP static Scheduler * __cdecl Create(const SchedulerPolicy& _Policy);
3063 
3070 
3071  virtual unsigned int Id() const =0;
3072 
3081 
3082  virtual unsigned int GetNumberOfVirtualProcessors() const =0;
3083 
3084 
3094 
3095  virtual SchedulerPolicy GetPolicy() const =0;
3096 
3111 
3112  virtual unsigned int Reference() =0 ;
3113 
3126 
3127  virtual unsigned int Release() =0;
3128 
3137 
3138  virtual void RegisterShutdownEvent(HANDLE _Event) =0;
3139 
3155 
3156  virtual void Attach() =0;
3157 
3175 
3176  _CRTIMP static void __cdecl SetDefaultSchedulerPolicy(const SchedulerPolicy& _Policy);
3177 
3189 
3190  _CRTIMP static void __cdecl ResetDefaultSchedulerPolicy();
3191 
3209 
3210  virtual ScheduleGroup * CreateScheduleGroup() =0;
3211 
3232 
3233  virtual ScheduleGroup * CreateScheduleGroup(location& _Placement) =0;
3234 
3248 
3249  virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data) =0;
3250 
3267 
3268  virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data, location& _Placement) =0;
3269 
3284 
3285  virtual bool IsAvailableLocation(const location& _Placement) const =0;
3286 };
3287 
3304 
3305 class Context
3306 {
3307 public:
3314 
3315  virtual unsigned int GetId() const =0;
3316 
3329 
3330  virtual unsigned int GetVirtualProcessorId() const =0;
3331 
3344 
3345  virtual unsigned int GetScheduleGroupId() const =0;
3346 
3354 
3355  _CRTIMP static unsigned int __cdecl Id();
3356 
3369 
3370  _CRTIMP static unsigned int __cdecl VirtualProcessorId();
3371 
3380 
3381  _CRTIMP static unsigned int __cdecl ScheduleGroupId();
3382 
3403 
3404  _CRTIMP static void __cdecl Block();
3405 
3426 
3427  virtual void Unblock() =0;
3428 
3444 
3445  virtual bool IsSynchronouslyBlocked() const =0;
3446 
3456 
3457  _CRTIMP static void __cdecl _SpinYield();
3458 
3469 
3470  _CRTIMP static void __cdecl Yield();
3471 
3482 
3483  _CRTIMP static bool __cdecl IsCurrentTaskCollectionCanceling();
3484 
3495 
3496  _CRTIMP static Context * __cdecl CurrentContext();
3497 
3507 
3508  _CRTIMP static void __cdecl Oversubscribe(bool _BeginOversubscription);
3509 
3510 protected:
3511 
3512  //
3513  // Privatize operator delete. The scheduler internally manages contexts.
3514  //
3515  template<class _T> friend void Concurrency::details::_InternalDeleteHelper(_T * _PObject);
3516 
3517  virtual ~Context() {};
3518 };
3519 
3520 #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */
3521 
3528 
3530 
3537 
3538 const unsigned int COOPERATIVE_TIMEOUT_INFINITE = (unsigned int)-1;
3539 
3547 
3549 {
3550 public:
3551 
3555 
3557 
3565 
3567 
3579 
3580  _CRTIMP void lock();
3581 
3589 
3590  _CRTIMP bool try_lock();
3591 
3602 
3603  _CRTIMP bool try_lock_for(unsigned int _Timeout);
3604 
3610 
3611  _CRTIMP void unlock();
3612 
3616 
3617  typedef critical_section& native_handle_type;
3618 
3629 
3630  _CRTIMP native_handle_type native_handle();
3631 
3639 
3640  void _Flush_current_owner();
3641 
3654 
3655  bool _Acquire_lock(void * _PLockingNode, bool _FHasExternalNode);
3656 
3660 
3662  {
3663  public:
3664 
3673 
3674  explicit _CRTIMP scoped_lock(critical_section& _Critical_section);
3675 
3680 
3682 
3683  private:
3684 
3685  critical_section& _M_critical_section;
3686  _CONCRT_BUFFER _M_node[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
3687 
3688  scoped_lock(const scoped_lock&); // no copy constructor
3689  scoped_lock const & operator=(const scoped_lock&); // no assignment operator
3690  };
3691 
3692 private:
3701 
3702  void _Switch_to_active(void * _PLockingNode);
3703 
3704  _CONCRT_BUFFER _M_activeNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
3705  void * volatile _M_pHead;
3706  void * volatile _M_pTail;
3707 
3711 
3713 
3717 
3719 };
3720 
3729 
3731 {
3732 public:
3733 
3737 
3739 
3747 
3749 
3763 
3764  _CRTIMP void lock();
3765 
3773 
3774  _CRTIMP bool try_lock();
3775 
3787 
3788  _CRTIMP void lock_read();
3789 
3797 
3798  _CRTIMP bool try_lock_read();
3799 
3811 
3812  _CRTIMP void unlock();
3813 
3827 
3828  void _Acquire_lock(void * _PLockingNode, bool _FHasExternalNode);
3829 
3833 
3835  {
3836  public:
3844 
3845  explicit _CRTIMP scoped_lock(reader_writer_lock& _Reader_writer_lock);
3846 
3850 
3852 
3853  private:
3854 
3855  reader_writer_lock& _M_reader_writer_lock;
3856  _CONCRT_BUFFER _M_writerNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
3857 
3858  scoped_lock(const scoped_lock&); // no copy constructor
3859  scoped_lock const & operator=(const scoped_lock&); // no assignment operator
3860  };
3861 
3865 
3867  {
3868  public:
3877 
3878  explicit _CRTIMP scoped_lock_read(reader_writer_lock& _Reader_writer_lock);
3879 
3883 
3885 
3886  private:
3887 
3889 
3890  scoped_lock_read(const scoped_lock_read&); // no copy constructor
3891  scoped_lock_read const & operator=(const scoped_lock_read&); // no assignment operator
3892  };
3893 
3894 private:
3895 
3903 
3904  bool _Set_next_writer(void * _PWriter);
3905 
3915 
3916  void * _Get_reader_convoy();
3917 
3923 
3924  void _Unlock_writer();
3925 
3930 
3931  void _Unlock_reader();
3932 
3942 
3943  void _Remove_last_writer(void * _PWriter);
3944 
3953 
3954  void _Switch_to_active(void * _PWriter);
3955 
3956  _CONCRT_BUFFER _M_activeWriter[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
3960  volatile long _M_lockState;
3961 
3965 
3966  reader_writer_lock (const reader_writer_lock& _Lock);
3967 
3971 
3973 };
3974 
3981 
3982 class event
3983 {
3984 public:
3985 
3989 
3990  _CRTIMP event();
3991 
3999 
4000  _CRTIMP ~event();
4001 
4016 
4017  _CRTIMP size_t wait(unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE);
4018 
4027 
4028  _CRTIMP void set();
4029 
4035 
4036  _CRTIMP void reset();
4037 
4069 
4070  _CRTIMP static size_t __cdecl wait_for_multiple(_In_reads_(_Count) event ** _PPEvents, size_t _Count, bool _FWaitAll, unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE);
4071 
4072 
4076  static const unsigned int timeout_infinite = COOPERATIVE_TIMEOUT_INFINITE;
4077 private:
4078 
4079  // Prevent bad usage of copy-constructor and copy-assignment
4080  event(const event& _Event);
4081  event& operator=(const event& _Event);
4082 
4083  void * volatile _M_pWaitChain;
4086 };
4087 
4088 namespace details
4089 {
4093 
4095  {
4096  public:
4097 
4101 
4103 
4107 
4109 
4118 
4120 
4132 
4133  _CRTIMP bool wait_for(Concurrency::critical_section& _Lck, unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE);
4134 
4138 
4139  _CRTIMP void notify_one();
4140 
4144 
4145  _CRTIMP void notify_all();
4146 
4147  private:
4148 
4149  // Prevent bad usage of copy-constructor and copy-assignment
4150  _Condition_variable(const _Condition_variable& _Event);
4151  _Condition_variable& operator=(const _Condition_variable& _Event);
4152 
4153  void * volatile _M_pWaitChain;
4155  };
4156 
4157  // Base class for all reference counted objects
4159  {
4160  public:
4161 
4163  {
4165  }
4166 
4167  // Acquires a reference
4168  // Returns the new reference count.
4169  long _Reference()
4170  {
4171  long _Refcount = _InterlockedIncrement(&_M_refCount);
4172 
4173  // 0 - 1 transition is illegal
4174  _CONCRT_ASSERT(_Refcount > 1);
4175  return _Refcount;
4176  }
4177 
4178  // Releases the reference
4179  // Returns the new reference count
4180  long _Release()
4181  {
4182  long _Refcount = _InterlockedDecrement(&_M_refCount);
4183  _CONCRT_ASSERT(_Refcount >= 0);
4184 
4185  if (_Refcount == 0)
4186  {
4187  _Destroy();
4188  }
4189 
4190  return _Refcount;
4191  }
4192 
4193  protected:
4194 
4195  // Allow derived classes to provide their own deleter
4196  virtual void _Destroy()
4197  {
4198  delete this;
4199  }
4200 
4201  // Only allow instantiation through derived class
4202  _RefCounterBase(long _InitialCount = 1) : _M_refCount(_InitialCount)
4203  {
4205  }
4206 
4207  // Reference count
4208  volatile long _M_refCount;
4209  };
4210 
4213 
4214  // This is a non-reentrant lock wrapper around the ConcRT critical-section
4215  // and used by agents/messaging
4217  {
4218  public:
4219 
4220  // Constructor for _NonReentrantPPLLock
4222 
4223  // Acquire the lock, spin if necessary
4224  _CRTIMP void _Acquire(void * _Lock_node);
4225 
4226  // Releases the lock
4227  _CRTIMP void _Release();
4228 
4229  // An exception safe RAII wrapper.
4231  {
4232  public:
4233  // Constructs a holder and acquires the specified lock
4234  _CRTIMP explicit _Scoped_lock(_NonReentrantPPLLock& _Lock);
4235 
4236  // Destroys the holder and releases the lock
4238 
4239  private:
4240  _NonReentrantPPLLock& _M_lock;
4241  _CONCRT_BUFFER _M_lockNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
4242 
4243  _Scoped_lock(const _Scoped_lock&); // no copy constructor
4244  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
4245  };
4246 
4247  private:
4248  // critical_section
4250  };
4251 
4252  // This is a reentrant lock implemented using the ConcRT critical section
4254  {
4255  public:
4256  // Constructor for _ReentrantPPLLock
4258 
4259  // Acquire the lock, spin if necessary
4260  _CRTIMP void _Acquire(void * _Lock_node);
4261 
4262  // Releases the lock
4263  _CRTIMP void _Release();
4264 
4265  // An exception safe RAII wrapper.
4267  {
4268  public:
4269  // Constructs a holder and acquires the specified lock
4270  _CRTIMP explicit _Scoped_lock(_ReentrantPPLLock& _Lock);
4271 
4272  // Destroys the holder and releases the lock
4274 
4275  private:
4276  _ReentrantPPLLock& _M_lock;
4277  _CONCRT_BUFFER _M_lockNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
4278 
4279  _Scoped_lock(const _Scoped_lock&); // no copy constructor
4280  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
4281  };
4282 
4283  private:
4284  // critical_section
4286 
4287  // The number of times this lock has been taken recursively
4289 
4290  // The current owner of the lock
4291  volatile long _M_owner;
4292  };
4293 
4294  struct _Chore
4295  {
4296  protected:
4297  // Constructors.
4298  explicit _Chore(TaskProc _PFunction) : m_pFunction(_PFunction)
4299  {
4300  }
4301 
4303  {
4304  }
4305 
4306  virtual ~_Chore()
4307  {
4308  }
4309 
4310  public:
4311 
4312  // The function which invokes the work of the chore.
4314  };
4315 
4316  // _UnrealizedChore represents an unrealized chore -- a unit of work that scheduled in a work
4317  // stealing capacity. Some higher level construct (language or library) will map atop this to provide
4318  // an usable abstraction to clients.
4319  class _UnrealizedChore : public _Chore, public _AllocBase
4320  {
4321  public:
4322  // Constructor for an unrealized chore.
4325  {
4326  }
4327  virtual ~_UnrealizedChore() {}
4328 
4329 
4330  // Method that executes the unrealized chore.
4331  void _Invoke()
4332  {
4333  _M_pChoreFunction(this);
4334  }
4335 
4336  // Sets the attachment state of the chore at the time of stealing.
4337  void _SetDetached(bool _FDetached);
4338 
4339  // Returns the owning collection of the chore.
4341  {
4342  return _M_pTaskCollection;
4343  }
4344 
4345  // Set flag that indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
4346  // The flag is ignored by _StructuredTaskCollection
4347  void _SetRuntimeOwnsLifetime(bool fValue)
4348  {
4349  _M_fRuntimeOwnsLifetime = fValue;
4350  }
4351 
4352  // Returns the flag that indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
4353  // The flag is ignored by _StructuredTaskCollection
4355  {
4356  return _M_fRuntimeOwnsLifetime;
4357  }
4358 
4359  // Allocator to be used when runtime owns lifetime.
4360  template <typename _ChoreType, typename _Function>
4361  static _ChoreType * _InternalAlloc(const _Function& _Func)
4362  {
4363  // This is always invoked from the PPL layer by the user and can never be attached to the default scheduler. Therefore '_concrt_new' is not required here
4364  _ChoreType * _Chore = new _ChoreType(_Func);
4365  _Chore->_M_fRuntimeOwnsLifetime = true;
4366  return _Chore;
4367  }
4368 
4369  // Internal helper routine to prepare for execution as a stolen chore.
4370  void _PrepareSteal(ContextBase *_PContext);
4371 
4372  protected:
4373  // Invocation bridge between the _UnrealizedChore and PPL.
4374  template <typename _ChoreType>
4375  static void __cdecl _InvokeBridge(void * _PContext)
4376  {
4377  auto _PChore = static_cast<_ChoreType *>(_PContext);
4378  (*_PChore)();
4379  }
4380 
4381  // Place associated task collection in a safe state.
4383 
4384  private:
4385 
4387  friend class _TaskCollection;
4388  typedef void (__cdecl * CHOREFUNC)(_UnrealizedChore * _PChore);
4389 
4390  // The collection of work to which this particular chore belongs.
4392 
4393  // Internal invocation inside the scheduler.
4395 
4396  // Indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
4397  // This flag is ignored by _StructuredTaskCollection
4399 
4400  // An indication of whether the chore (if stolen) was detached.
4402 
4403  // Helper routines
4404  void _PrepareStealStructured(ContextBase *_PContext);
4405  void _PrepareStealUnstructured(ContextBase *_PContext);
4406 
4407  // The internal wrapper around invocation of stolen structured chores.
4408  __declspec(noinline)
4409  static void __cdecl _StructuredChoreWrapper(_UnrealizedChore * _PChore);
4410 
4411  // The internal wrapper around invocation of stolen unstructured chores.
4412  __declspec(noinline)
4413  static void __cdecl _UnstructuredChoreWrapper(_UnrealizedChore * _PChore);
4414 
4415  // To free memory allocated with _InternalAlloc.
4416  static void _InternalFree(_UnrealizedChore * _PChore);
4417 
4418  // Cancellation via token to a stolen chore
4419  static void __cdecl _CancelViaToken(::Concurrency::details::ContextBase *pContext);
4420  };
4421 
4422  // Represents possible results of waiting on a task collection.
4424  {
4428  };
4429 
4430  // _TaskCollectionBase represents an abstract set of work and provides shared waiting semantics for stolen work.
4432  {
4433  public:
4434  // Constructs a new task collection.
4436  _M_pTokenState(NULL),
4437  _M_completedStolenChores(_CollectionNotInitialized),
4438  _M_unpoppedChores(0),
4439  _M_pException(NULL),
4440  _M_inliningDepth(_S_notInlined)
4441  {
4442  }
4443 
4444  // Constructs a new task collection based on a given cancellation token.
4446  _M_pTokenState(_PTokenState),
4447  _M_completedStolenChores(_CollectionNotInitialized),
4448  _M_unpoppedChores(0),
4449  _M_pException(NULL),
4450  _M_inliningDepth(_S_notInlined)
4451  {
4452  }
4453 
4454  // Returns the owning context of the task collection.
4455  void * _OwningContext() const
4456  {
4457  return _M_pOwningContext;
4458  }
4459 
4460  // Returns the inlining depth.
4461  int _InliningDepth() const
4462  {
4463  return _M_inliningDepth;
4464  }
4465 
4466  // Tells if the task collection is inlined - some thread somewhere is currently invoking wait on it.
4467  bool _IsCurrentlyInlined() const
4468  {
4469  return (_M_inliningDepth != _S_notInlined);
4470  }
4471 
4472  // Returns whether this is a structured collection or not.
4474  {
4475  return (_M_inlineFlags & _S_structured) != 0;
4476  }
4477 
4478  // Returns the token state associated with this task collection
4479  _CancellationTokenState *_GetTokenState(_CancellationTokenRegistration **_PRegistration = NULL);
4480 
4481  protected:
4482 
4484  friend class Concurrency::details::ContextBase;
4485 
4487  {
4488  _CollectionNotInitialized = LONG_MIN,
4489  _CollectionInitializationInProgress = LONG_MIN+1,
4490  _CollectionInitialized = 0
4491  };
4492 
4493  // Returns the exception portion of _M_pException.
4494  std::exception_ptr * _Exception() const
4495  {
4496  return (std::exception_ptr *) ((size_t)_M_pException & ~_S_cancelBitsMask);
4497  }
4498 
4499  // Indicates whether or not this task collection has an abnormal exit.
4500  bool _IsAbnormalExit() const
4501  {
4502  return _M_pException != NULL;
4503  }
4504 
4505  // Returns the cancel flags.
4506  size_t _CancelState() const
4507  {
4508  return (size_t) _M_pException & _S_cancelBitsMask;
4509  }
4510 
4511  // Returns whether or not the collection is marked for cancellation.
4513  {
4514  return (_CancelState() & _S_cancelBitsMask) != 0;
4515  }
4516 
4517  // Returns whether an inline cancellation was performed.
4519  {
4520  _CONCRT_ASSERT(_CancelState() != _S_cancelStarted);
4521  return _CancelState() == _S_cancelShotdownOwner;
4522  }
4523 
4525  {
4526  _CONCRT_ASSERT(_CancelState() != _S_cancelStarted);
4527  return _CancelState() == _S_cancelDeferredShootdownOwner;
4528  }
4529 
4530  // Returns the parent collection safely.
4532  {
4533  return ((_M_inliningDepth != _S_notInlined) ? _M_pParent : NULL);
4534  }
4535 
4536  // Called in order to determine whether this task collection will interrupt for a pending cancellation at or above it.
4537  bool _WillInterruptForPendingCancel();
4538 
4539  // Called when an exception is raised on a chore on a given task collection, this makes a determination of what to do with the exception
4540  // and saves it for potential transport back to the thread performing a join on a chore collection.
4541  void _RaisedException();
4542 
4543  // Potentially rethrows the exception which was set with _RaisedException. The caller has responsibility to ensure that _RaisedException
4544  // was called prior to calling this and that _M_pException has progressed beyond the _S_nonNull state.
4545  void _RethrowException();
4546 
4547  // Marks the collection for cancellation and returns whether the collection was marked.
4548  bool _MarkCancellation();
4549 
4550  // Finishes the cancellation state (changing from _S_cancelStarted to one of the other states). Note that only the
4551  // thread which successfully marked cancellation can call this.
4552  void _FinishCancelState(size_t _NewCancelState);
4553 
4554  // Called when a cancellation is raised on a chore on a given task collection. This makes a determination of what to do with the exception
4555  // and saves it for potential transport back to the thread performing a join on a chore collection. Note that every other exception
4556  // has precedence over a cancellation.
4557  void _RaisedCancel();
4558 
4559  // Tracks the parent collection. (For example, A task collection B created during execution of a chore C on task collection A is
4560  // considered a child of A).
4562 
4563  // Tracks the inlining depth of this collection for cancellation purposes and packs a series of definition bits.
4564  int _M_inliningDepth : 28;
4565  int _M_inlineFlags : 4;
4566 
4567  // The cancellation token for the task collection.
4569 
4570  // The context which owns the task collection. This is the context where the collection is created.
4572 
4573  // The number of unpopped chores associated with the task collection (set by the derived
4574  // class during chore association.
4576 
4577  // The number of stolen chores executed so far.
4579 
4580  // The stored exception which has been marshaled from the thread a stolen chore ran upon to the thread that is waiting on the
4581  // task collection.
4582  //
4583  // The lower two bits of _M_pException are utilized for the cancellation state machine. The upper 30 are the exception pointer. This implies
4584  // that the exception pointer must be 4-byte aligned. Because of intermediate states, the exception pointer cannot be between 0x8 and 0xF. The heap should
4585  // not be allocating such...
4586  //
4587  std::exception_ptr * _M_pException;
4588 
4589  // Cancellation states
4590  static const size_t _S_cancelBitsMask = 0x3;
4591  static const size_t _S_cancelNone = 0x0;
4592  static const size_t _S_cancelStarted = 0x1;
4593  static const size_t _S_cancelDeferredShootdownOwner = 0x2;
4594  static const size_t _S_cancelShotdownOwner = 0x3;
4595 
4596  // Intermediate exceptions.
4597  static const size_t _S_nonNull = 0x8;
4598  static const size_t _S_cancelException = 0xC;
4599 
4600  // initialization state for inlining depth.
4601  static const int _S_notInlined = -1;
4602 
4603  // Inline flags.
4604  static const int _S_structured = 0x00000001;
4605  static const int _S_localCancel = 0x00000002;
4606  static const int _S_reserved = 0x0000000C;
4607 
4608  private:
4609 
4610  // Prevent bad usage of copy-constructor and copy-assignment
4611  _TaskCollectionBase(const _TaskCollectionBase& _Collection);
4612  _TaskCollectionBase& operator=(const _TaskCollectionBase& _Collection);
4613  };
4614 
4619 
4621  {
4622  public:
4623 
4627 
4629  {
4630  _Construct();
4631  _M_pTokenState = NULL;
4632  }
4633 
4640 
4642 
4649 
4651 
4666 
4667  _CRTIMP void _Schedule(_UnrealizedChore * _PChore, location * _PLocation);
4668 
4679 
4680  _CRTIMP void _Schedule(_UnrealizedChore * _PChore);
4681 
4685 
4686  _CRTIMP void _Cancel();
4687 
4698 
4699  _CRTIMP bool _IsCanceling();
4700 
4714 
4715  _CRTIMP _TaskCollectionStatus __stdcall _RunAndWait(_UnrealizedChore * _PChore = NULL);
4716 
4726 
4728  {
4729  return _RunAndWait();
4730  }
4731 
4735 
4736  void _CancelStolenContexts();
4737 
4738  private:
4739 
4740  friend class _UnrealizedChore;
4741 
4742  void _Construct()
4743  {
4744  _M_pOwningContext = NULL;
4745  _M_inlineFlags = _S_structured;
4746  }
4747 
4751 
4752  _CRTIMP void _Abort();
4753 
4757  _CRTIMP void _CleanupToken();
4758 
4762 
4764  {
4765  //
4766  // Users are required to call Wait() before letting the destructor run. Otherwise, throw. Note that before throwing,
4767  // we must actually wait on the tasks because they contain pointers into stack frames and unwinding without the wait is
4768  // instant stack corruption.
4769  //
4770  if (_M_unpoppedChores > 0)
4771  {
4772  _Abort();
4773 
4774  if (!__uncaught_exception())
4775  {
4776  return false;
4777  }
4778  }
4779 
4780  return true;
4781  }
4782 
4786 
4787  void _Initialize();
4788 
4795 
4796  void _WaitOnStolenChores(long _StolenChoreCount);
4797 
4801 
4802  void _CountUp();
4803 
4808 
4809  static void __cdecl _CancelViaToken(_StructuredTaskCollection *pCollection);
4810 
4811  //
4812  // _StructuredTaskCollection::_M_event is used to construct an structured event object only when it is needed to block. The structured event object
4813  // has no state to cleanup, therefore no dtor code is required.
4814  //
4815  _CONCRT_BUFFER _M_event[(sizeof(void*) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
4816  };
4817 
4824 
4826  {
4827  public:
4828 
4832 
4834 
4841 
4843 
4850 
4851  _CRTIMP ~_TaskCollection();
4852 
4867 
4868  _CRTIMP void _Schedule(_UnrealizedChore * _PChore, location * _PLocation);
4869 
4880 
4881  _CRTIMP void _Schedule(_UnrealizedChore * _PChore);
4882 
4886 
4887  _CRTIMP void _Cancel();
4888 
4899 
4900  _CRTIMP bool _IsCanceling();
4901 
4916 
4917  _CRTIMP _TaskCollectionStatus __stdcall _RunAndWait(_UnrealizedChore * _PChore = NULL);
4918 
4929 
4931  {
4932  return _RunAndWait();
4933  }
4934 
4938 
4939  bool _IsMarkedForAbnormalExit() const;
4940 
4944 
4945  _TaskCollection * _OriginalCollection() const;
4946 
4950 
4951  bool _IsAlias() const;
4952 
4962 
4963  void _RegisterCompletionHandler(TaskProc _Func, void * _PCompletionContext);
4964 
4965  private:
4966 
4967  friend class _UnrealizedChore;
4968  friend class Concurrency::details::ContextBase;
4969 
4974 
4975  bool _IsStaleAlias() const;
4976 
4980 
4981  void _ReleaseAlias();
4982 
4992 
4993  _TaskCollection(_TaskCollection * _POriginCollection, bool _FDirectAlias);
4994 
4998 
4999  _TaskCollection * _Alias();
5000 
5007 
5008  void _Abort(bool fLeaveCanceled = false);
5009 
5013 
5014  bool _IsIndirectAlias() const;
5015 
5019 
5020  bool _IsDirectAlias() const;
5021 
5025 
5026  bool _HasDirectAlias() const;
5027 
5038 
5039  void _Cancel(bool _InsideException, _TaskCollection * _PSnapPoint);
5040 
5044 
5045  void _NotifyNewChore();
5046 
5053 
5054  void _NotifyCompletedChoreAndFree(_UnrealizedChore * _PChore = NULL);
5055 
5062 
5063  void _FullAliasWait(_TaskCollection * _PSnapPoint);
5064 
5071 
5072  void _Reset(_TaskCollection * _PSnapPoint);
5073 
5079 
5080  void _RaisedException();
5081 
5087 
5088  void _RaisedCancel();
5089 
5099 
5100  bool _SetCancelState(long _Status);
5101 
5109 
5110  void _CancelFromArbitraryThread(bool _InsideException);
5111 
5122 
5123  void _CancelDirectAliases(bool _InsideException, _TaskCollection * _PSnapPoint);
5124 
5137 
5138  void _CancelStolenContexts(bool _InsideException, bool _FInlineGated);
5139 
5143 
5144  void *_GetStealTrackingList() const;
5145 
5149 
5150  void _Initialize();
5151 
5158 
5159  void _AbortiveSweep(void *_PCtx);
5160 
5173 
5174  static bool __cdecl _CollectionMatchPredicate(_UnrealizedChore *_PChore, void *_PData);
5175 
5188 
5189  static bool __cdecl _SweepAbortedChore(_UnrealizedChore *_PChore, void *_PData);
5190 
5197 
5198  bool _TaskCleanup(bool fExceptional);
5199 
5203 
5204  static void __cdecl _CancelViaToken(_TaskCollection *pCollection);
5205 
5210 
5211  _CONCRT_BUFFER _M_stealTracker[_SAFERWLIST_SIZE];
5212 
5217 
5219 
5224 
5225  volatile long _M_exitCode;
5226 
5230 
5231  volatile long _M_executionStatus;
5232 
5236 
5237  event _M_event;
5238 
5239  _TaskCollection * _M_pOriginalCollection;
5240  _TaskCollection * _M_pNextAlias;
5242 
5243  int _M_taskCookies[2];
5244 
5245  volatile long _M_flags;
5246  volatile long _M_chaining;
5247 
5250 
5253  };
5254 
5271  {
5272  // Disable inline scheduling
5274  // Let runtime decide whether to do inline scheduling or not
5276  // Always do inline scheduling
5278  };
5279 
5285  {
5286  public:
5287  _StackGuard() : _Depth(_GetCurrentInlineDepth())
5288  {
5289  // _Depth is the reference to the depth slot on context.
5290  ++_Depth;
5291  }
5293  {
5294  // _Depth is the reference to the depth slot on context.
5295  --_Depth;
5296  }
5297 
5298  bool _ShouldInline(_TaskInliningMode _InliningMode) const
5299  {
5300  // As _TaskInliningMode is defined as inlining threshold, we can directly convert
5301  // it into size_t, and compare with current context inlining depth.
5302  return _Depth <= static_cast<size_t>(_InliningMode);
5303  }
5304  private:
5305  size_t & _Depth;
5306  _StackGuard & operator =(const _StackGuard &);
5307 
5313  _CRTIMP static size_t & __cdecl _GetCurrentInlineDepth();
5314  };
5315 
5322  {
5323  public:
5324 
5334  _CRTIMP static _AsyncTaskCollection * __cdecl _NewCollection(_CancellationTokenState *_PTokenState);
5335 
5354  {
5355  _CONCRT_ASSERT(_PChore);
5356  _Reference();
5357 
5358  if (_InliningMode == _NoInline)
5359  {
5360  _M_taskCollection._Schedule(_PChore);
5361  return _NotComplete;
5362  }
5363  else
5364  {
5365  _StackGuard _Guard;
5366  if (_Guard._ShouldInline(_InliningMode))
5367  {
5368  return _M_taskCollection._RunAndWait(_PChore);
5369  }
5370  else
5371  {
5372  _M_taskCollection._Schedule(_PChore);
5373  return _NotComplete;
5374  }
5375  }
5376  }
5377 
5381  void _Cancel()
5382  {
5383  _M_taskCollection._Cancel();
5384  }
5385 
5400  {
5401  // Note that _Guard is NOT unused variable, the constructor and destructor will be called to maintain inline depth.
5402  _StackGuard _Guard;
5403  return _M_taskCollection._RunAndWait();
5404  }
5405 
5406  private:
5407 
5408  void _NotificationHandler();
5409 
5410  _CRTIMP virtual void _Destroy();
5411 
5412  // Private constructor
5414 
5415  __declspec(noinline)
5416  static void __cdecl _CompletionHandler(void * _PCompletionContext);
5417 
5418  private:
5419 
5420  // Underlying task collection where the chore is scheduled to run
5421  _TaskCollection _M_taskCollection;
5422  };
5423 
5428  {
5429  volatile long _M_signals;
5430  };
5431 
5432  typedef void (__cdecl * _UnobservedExceptionHandler)(void);
5434 
5435  // Used to report unobserved task exceptions in ppltasks.h
5436  _CRTIMP void __cdecl _ReportUnobservedException();
5437 
5448  {
5449  public:
5450 
5452 
5453  _CRTIMP ~_Cancellation_beacon();
5454 
5455  bool _Is_signaled() const
5456  {
5457  return (_M_pRef->_M_signals != 0);
5458  }
5459 
5460  // This method should only be called when the beacon is signaled. It confirms whether a cancellation is indeed happening and that the beacon
5461  // was not flagged due to a false positive race. If the cancellation is not confirmed, the beacon is lowered.
5462  _CRTIMP bool _Confirm_cancel();
5463 
5464  void _Raise()
5465  {
5466  _InterlockedIncrement(&_M_pRef->_M_signals);
5467  }
5468 
5469  void _Lower()
5470  {
5471  _InterlockedDecrement(&_M_pRef->_M_signals);
5472  }
5473 
5474  private:
5475 
5477 
5478  };
5479 
5480  //
5481  // Internal stub class.
5482  //
5483  class _TimerStub;
5484 
5485  //
5486  // Internal wrapper around timers in order to allow timer messaging blocks to share implementation with internal ConcRT runtime
5487  // timers.
5488  //
5489  class _Timer
5490  {
5491  protected:
5492  // Constructs a new timer.
5493  //
5494  // _Ms: The duration and period of the timer in milliseconds.
5495  // _FRepeating: An indication of whether the timer is repeating (periodic) or not.
5496  _CRTIMP _Timer(unsigned int _Ms, bool _FRepeating);
5497 
5498  // Destroys the timer.
5499  _CRTIMP virtual ~_Timer();
5500 
5501  // Starts the timer.
5502  _CRTIMP void _Start();
5503 
5504  // Stops the timer.
5505  _CRTIMP void _Stop();
5506 
5507  private:
5508  friend class _TimerStub;
5509 
5510  // Called when the timer fires.
5511  virtual void _Fire() =0;
5512 
5513  // The actual timer
5514  HANDLE _M_hTimer;
5515 
5516  // The duration and period of the timer.
5517  unsigned int _M_ms;
5518 
5519  // Whether the timer is repeating (periodic by _M_ms)
5521  };
5522 
5523  //
5524  // Internal runtime structure that holds the trace flags and level for ETW events
5525  // provided by the Concurrent runtime.
5526  //
5528  {
5529  volatile unsigned long EnableFlags; // Determines which class of events to log
5530  volatile unsigned char EnableLevel; // Determines the serverity of events to log
5531 
5532  void _EnableTrace(unsigned char level, unsigned long flags)
5533  {
5534  EnableFlags = flags;
5535  EnableLevel = level;
5536  }
5537 
5539  {
5540  EnableLevel = 0;
5541  EnableFlags = 0;
5542  }
5543 
5544  bool _IsEnabled(unsigned char level, unsigned long flags) const
5545  {
5546  return ((level <= EnableLevel) && ((EnableFlags & flags) == flags));
5547  }
5548  };
5549 
5554 
5555  _CRTIMP const _CONCRT_TRACE_INFO * _GetConcRTTraceInfo();
5556 
5560 
5562 
5566 
5568 
5569 } // namespace details
5570 
5571 
5578 
5579 __declspec(deprecated("Concurrency::EnableTracing is a deprecated function.")) _CRTIMP HRESULT __cdecl EnableTracing();
5580 
5588 
5589 __declspec(deprecated("Concurrency::DisableTracing is a deprecated function.")) _CRTIMP HRESULT __cdecl DisableTracing();
5590 
5594 
5596 {
5600 
5605 
5610 
5615 
5620 
5625 
5630 
5635 
5640 
5642 };
5643 
5644 // Common trace header structure for all ConcRT diagnostic events
5645 // struct CONCRT_TRACE_EVENT_HEADER_COMMON
5646 // {
5647 // EVENT_TRACE_HEADER header;
5648 // DWORD VirtualProcessorID;
5649 // DWORD SchedulerID;
5650 // DWORD ContextID;
5651 // DWORD ScheduleGroupID;
5652 // };
5653 
5657 
5658 extern "C" const __declspec(selectany) GUID ConcRT_ProviderGuid = { 0xF7B697A3, 0x4DB5, 0x4d3b, { 0xBE, 0x71, 0xC4, 0xD2, 0x84, 0xE6, 0x59, 0x2F } };
5659 
5660 //
5661 // GUIDS for events
5662 //
5663 
5670 
5671 extern "C" const __declspec(selectany) GUID ConcRTEventGuid = { 0x72B14A7D, 0x704C, 0x423e, { 0x92, 0xF8, 0x7E, 0x6D, 0x64, 0xBC, 0xB9, 0x2A } };
5672 
5678 
5679 extern "C" const __declspec(selectany) GUID SchedulerEventGuid = { 0xE2091F8A, 0x1E0A, 0x4731, { 0x84, 0xA2, 0x0D, 0xD5, 0x7C, 0x8A, 0x52, 0x61 } };
5680 
5688 
5689 extern "C" const __declspec(selectany) GUID ScheduleGroupEventGuid = { 0xE8A3BF1F, 0xA86B, 0x4390, { 0x9C, 0x60, 0x53, 0x90, 0xB9, 0x69, 0xD2, 0x2C } };
5690 
5695 
5696 extern "C" const __declspec(selectany) GUID ContextEventGuid = { 0x5727A00F, 0x50BE, 0x4519, { 0x82, 0x56, 0xF7, 0x69, 0x98, 0x71, 0xFE, 0xCB } };
5697 
5706 
5707 extern "C" const __declspec(selectany) GUID ChoreEventGuid = { 0x7E854EC7, 0xCDC4, 0x405a, { 0xB5, 0xB2, 0xAA, 0xF7, 0xC9, 0xE7, 0xD4, 0x0C } };
5708 
5712 
5713 extern "C" const __declspec(selectany) GUID VirtualProcessorEventGuid = { 0x2f27805f, 0x1676, 0x4ecc, { 0x96, 0xfa, 0x7e, 0xb0, 0x9d, 0x44, 0x30, 0x2f } };
5714 
5723 
5724 extern "C" const __declspec(selectany) GUID LockEventGuid = { 0x79A60DC6, 0x5FC8, 0x4952, { 0xA4, 0x1C, 0x11, 0x63, 0xAE, 0xEC, 0x5E, 0xB8 } };
5725 
5733 
5734 extern "C" const __declspec(selectany) GUID ResourceManagerEventGuid = { 0x2718D25B, 0x5BF5, 0x4479, { 0x8E, 0x88, 0xBA, 0xBC, 0x64, 0xBD, 0xBF, 0xCA } };
5735 
5741 
5742 extern "C" const __declspec(selectany) GUID PPLParallelInvokeEventGuid = { 0xd1b5b133, 0xec3d, 0x49f4, { 0x98, 0xa3, 0x46, 0x4d, 0x1a, 0x9e, 0x46, 0x82 } };
5743 
5749 
5750 extern "C" const __declspec(selectany) GUID PPLParallelForEventGuid = { 0x31c8da6b, 0x6165, 0x4042, { 0x8b, 0x92, 0x94, 0x9e, 0x31, 0x5f, 0x4d, 0x84 } };
5751 
5757 
5758 extern "C" const __declspec(selectany) GUID PPLParallelForeachEventGuid = { 0x5cb7d785, 0x9d66, 0x465d, { 0xba, 0xe1, 0x46, 0x11, 0x6, 0x1b, 0x54, 0x34 } };
5759 
5763 
5764 extern "C" const __declspec(selectany) GUID AgentEventGuid = {0xb9b5b78c, 0x713, 0x4898, { 0xa2, 0x1a, 0xc6, 0x79, 0x49, 0xdc, 0xed, 0x7 } };
5765 
5766 // Trace an event signaling a parallel function
5767 _CRTIMP void __cdecl _Trace_ppl_function(const GUID& _Guid, unsigned char _Level, ConcRT_EventType _Type);
5768 
5772 
5774 {
5781 
5782  AllEventsFlag = 0xFFFFFFFF
5783 };
5784 
5788 
5790 {
5794 
5796 
5800 
5802 
5806 
5808 
5812 
5814 
5818 
5820 
5824 
5826 
5830 
5832 
5836 
5838 
5839 };
5840 
5841 // // Common trace payload for agents
5842 //
5843 // struct AGENTS_TRACE_PAYLOAD
5844 // {
5845 // // Identifier of the agent or message block that is emitting the event
5846 // __int64 AgentId1;
5847 // union
5848 // {
5849 // // The identifier of a target block for link/unlink event
5850 // __int64 AgentId2;
5851 //
5852 // // Count of messages processed for the end event
5853 // long Count;
5854 //
5855 // // Name of this agent for the purposes of the ETW trace
5856 // wchar_t Name[32];
5857 // };
5858 // };
5859 
5860 // Emit a trace event specific to the agents library of the given type and payload
5861 _CRTIMP void __cdecl _Trace_agents(Agents_EventType _Type, __int64 agentId, ...);
5862 }
5863 
5864 namespace concurrency = Concurrency;
5865 
5866 #pragma pop_macro("new")
5867 #pragma pack(pop)
friend class _TaskCollection
Definition: concrt.h:4387
_CRTIMP improper_lock()
Constructs an improper_lock exception.
DWORD _M_boundQueueId
Definition: concrt.h:5248
_Scoped_lock const & operator=(const _Scoped_lock &)
void operator=(const _SpinLock &)
A cancellation beacon is a flag which can be polled in an inlinable fashion using the is_signaled met...
Definition: concrt.h:5447
std::exception_ptr * _Exception() const
Definition: concrt.h:4494
_CRTIMP bool try_lock()
Attempts to acquire the reader-writer lock as a writer without blocking.
int _InliningDepth() const
Definition: concrt.h:4461
Definition: concrt.h:378
void _Raise()
Definition: concrt.h:5464
_CRTIMP _Scoped_lock(_NonReentrantPPLLock &_Lock)
void _PrepareStealStructured(ContextBase *_PContext)
void _Assign(const location &_Rhs)
Assigns _Rhs to this location.
Definition: concrt.h:2075
_CRTIMP unsupported_os()
Constructs an unsupported_os object.
_CRTIMP ~reader_writer_lock()
Destroys the reader_writer_lock object.
void * _M_pReaderHead
Definition: concrt.h:3957
void _AddNode(_ElemType *_Elem)
Definition: concrt.h:1137
bool _ShouldInline(_TaskInliningMode _InliningMode) const
Definition: concrt.h:5298
Definition: functional:256
long _Reference()
Definition: concrt.h:4169
This class describes an exception thrown when an attempt is made to set the concurrency limits of a S...
Definition: concrt.h:1678
_ReaderWriterLock & _M_lock
Definition: concrt.h:1028
_YieldFunction _M_yieldFunction
Definition: concrt.h:779
Agents_EventType
The types of events that can be traced using the tracing functionality offered by the Agents Library ...
Definition: concrt.h:5789
_SpinWait(_YieldFunction _YieldMethod=_UnderlyingYield)
Construct a spin wait object
Definition: concrt.h:614
_CRTIMP invalid_operation()
Constructs an invalid_operation object.
This class describes an exception thrown when an invalid operation is performed that is not more accu...
Definition: concrt.h:1705
An event type used for miscellaneous events.
Definition: concrt.h:5601
static void __cdecl _Initialize()
_CRTIMP _In_ int _Value
Definition: setjmp.h:190
Indicates that the location represents a particular NUMA node.
Definition: concrt.h:2026
An event type that marks the beginning of a start/end event pair.
Definition: concrt.h:5611
size_t _M_ElemsConstructed
Definition: concrt.h:1112
_NonReentrantPPLLock & _M_lock
Definition: concrt.h:4240
_CRTIMP invalid_scheduler_policy_thread_specification()
Constructs an invalid_scheduler_policy_value object.
Concurrency::details::_TaskCollectionBase * _OwningCollection() const
Definition: concrt.h:4340
Definition: concrt.h:555
critical_section & native_handle_type
A reference to a critical_section object.
Definition: concrt.h:3617
_CRTIMP size_t wait(unsigned int _Timeout=COOPERATIVE_TIMEOUT_INFINITE)
Waits for the event to become signaled.
Structured task collections represent groups of work which follow a strictly LIFO ordered paradigm qu...
Definition: concrt.h:4620
void * _M_pOwningContext
Definition: concrt.h:4571
Async Task collections is a thin wrapper over task collection to cater to the execution of asynchrono...
Definition: concrt.h:5321
This class describes an exception thrown when a lock is acquired improperly.
Definition: concrt.h:1851
static _CRTIMP void __cdecl _ScheduleTask(TaskProc _Proc, void *_Data)
long _M_activeStealersForCancellation
A count of active stealers for CANCELLATION PURPOSES ONLY. This is non-interlocked and guarded by the...
Definition: concrt.h:5218
An event type that represents the linking of message blocks
Definition: concrt.h:5825
This class describes an exception thrown when an invalid or unknown key is passed to a SchedulerPolic...
Definition: concrt.h:1622
static _Ty _LoadWithAquire(volatile _Ty &_Location)
Definition: concrt.h:427
void _Construct()
Definition: concrt.h:4742
Definition: concrt.h:390
An event type that represents the unlinking of message blocks
Definition: concrt.h:5831
volatile long _M_owner
Definition: concrt.h:4291
An event type that represents the act of a attaching to a scheduler.
Definition: concrt.h:5636
_Scoped_lock const & operator=(const _Scoped_lock &)
_CRTIMP unsigned int _Release()
void _DisableTrace()
Definition: concrt.h:5538
#define _CRTIMP
Definition: crtdefs.h:23
_CRTIMP message_not_found()
Constructs a message_not_found object.
Implements busy wait with no backoff
Definition: concrt.h:604
bool operator!=(const location &_Rhs) const
Determines whether two location objects represent different location.
Definition: concrt.h:1995
TaskProc m_pFunction
Definition: concrt.h:4313
_CRTIMP default_scheduler_exists()
Constructs a default_scheduler_exists object.
#define _CONCRT_ASSERT(x)
Definition: concrt.h:137
Definition: concrt.h:4426
Definition: concrt.h:5277
_TaskCollectionBase * _M_pParent
Definition: concrt.h:4561
_Ty _FetchAndAdd(_Ty _Addend)
Definition: concrt.h:515
_TaskCollectionBase(_CancellationTokenState *_PTokenState)
Definition: concrt.h:4445
_CRTIMP invalid_oversubscribe_operation()
Constructs an invalid_oversubscribe_operation object.
bool _IsMarkedForCancellation() const
Definition: concrt.h:4512
_CRTIMP bool wait_for(Concurrency::critical_section &_Lck, unsigned int _Timeout=COOPERATIVE_TIMEOUT_INFINITE)
Waits for the _Condition_variable to become signaled. The lock argument passed in is unlocked by the ...
static _CRTIMP location __cdecl current()
Returns a location object representing the most specific place the calling thread is executing...
unsigned int _M_ms
Definition: concrt.h:5517
_CRTIMP ~event()
Destroys an event.
Definition: concrt.h:4158
_UnrealizedChore()
Definition: concrt.h:4323
T * _As() const
Returns the internal binding as a specified object.
Definition: concrt.h:2100
Indicates that the location represents a particular scheduling node.
Definition: concrt.h:2031
_W64 unsigned int size_t
Definition: crtdefs.h:496
This class describes an exception thrown when the Reference method is called on a Scheduler object th...
Definition: concrt.h:1393
_Ty operator=(_Ty _Rhs)
Definition: concrt.h:506
Definition: concrt.h:5779
void(__cdecl * CHOREFUNC)(_UnrealizedChore *_PChore)
Definition: concrt.h:4388
_CRTIMP void lock()
Acquires the reader-writer lock as a writer.
void *volatile _M_pHead
Definition: concrt.h:3705
bool _HasWriteLock() const
Definition: concrt.h:978
volatile long & _M_flag
Definition: concrt.h:558
static void __cdecl _WaitEquals(volatile const long &_Location, long _Value, long _Mask=0xFFFFFFFF)
void _Unlock_reader()
Called from unlock() when a reader is holding the lock. Reader count is decremented and if this is th...
_CRTIMP task_canceled()
Constructs a task_canceled object.
_TaskCollectionStatus _RunAndWait()
A cancellation friendly wrapper with which to execute _PChore and then waits for all chores running i...
Definition: concrt.h:5399
_CRTIMP context_self_unblock()
Constructs a context_self_unblock object.
typedef __success(return >=0) long HRESULT
CHOREFUNC _M_pChoreFunction
Definition: concrt.h:4394
This class describes an exception thrown when calls to the Block and Unblock methods of a Context obj...
Definition: concrt.h:1452
This class describes an exception thrown when a messaging block is given a pointer to a target which ...
Definition: concrt.h:1543
void(__cdecl * TaskProc)(void *)
Concurrency::details contains definitions of support routines in the public namespaces and one or mor...
Definition: concrt.h:265
This class describes an exception thrown by the PPL tasks layer in order to force the current task to...
Definition: concrt.h:1879
void(__cdecl * _UnobservedExceptionHandler)(void)
Definition: concrt.h:5432
_CONCRT_BUFFER _M_criticalSection[(4 *sizeof(void *)+2 *sizeof(long)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:835
scoped_lock const & operator=(const scoped_lock &)
size_t _CancelState() const
Definition: concrt.h:4506
An event type that represents the creation of an object
Definition: concrt.h:5795
virtual ~_RefCounterBase()
Definition: concrt.h:4162
RAII wrapper used to maintain and limit ppltask maximum inline schedule depth. This class will keep a...
Definition: concrt.h:5284
long _M_unpoppedChores
Definition: concrt.h:4575
_CRTIMP _In_opt_z_ const wchar_t _In_opt_z_ const wchar_t unsigned int
Definition: crtdefs.h:642
ConcRT_EventType
The types of events that can be traced using the tracing functionality offered by the Concurrency Run...
Definition: concrt.h:5595
void * _M_pTaskExtension
Definition: concrt.h:5241
_TaskCollectionStatus _ScheduleWithAutoInline(_UnrealizedChore *_PChore, _TaskInliningMode _InliningMode)
Schedule a chore with automatic inlining. The chore is pushed onto the associated workstealing queue...
Definition: concrt.h:5353
An event type that represents the act of unblocking a context.
Definition: concrt.h:5621
Definition: concrt.h:4427
long _M_recursionCount
Definition: concrt.h:882
_Scoped_lock(_NonReentrantBlockingLock &_Lock)
Definition: concrt.h:918
critical_section & _M_critical_section
Definition: concrt.h:3685
_CRTIMP event()
Constructs a new event.
volatile unsigned long EnableFlags
Definition: concrt.h:5529
_SpinWait< 0 > _SpinWaitNoYield
Definition: concrt.h:783
_CRTIMP void lock()
Acquires this critical section.
#define _Post_invalid_
Definition: sal.h:701
Concrt_TraceFlags
Trace flags for the event types
Definition: concrt.h:5773
void _IncrementConstructedElemsCount()
Definition: concrt.h:1095
~location()
Destroys a location object.
Definition: concrt.h:1972
volatile long _M_signals
Definition: concrt.h:5429
_MallocaListHolder & operator=(const _MallocaListHolder &)
virtual ~_Chore()
Definition: concrt.h:4306
_CRTIMP unsigned int _Reference()
_ElemNodeType & operator=(const _ElemNodeType &)
_At_(this->_M_FirstNode, _Pre_valid_) virtual ~_MallocaListHolder()
Definition: concrt.h:1156
typedef void(__cdecl *_se_translator_function)(unsigned int
Definition: concrt.h:5778
_CRTIMP improper_scheduler_attach()
Constructs an improper_scheduler_attach object.
HRESULT _Hresult
Definition: concrt.h:1245
long _M_recursionCount
Definition: concrt.h:4288
static _Ty _Decrement(volatile _Ty &_Location)
Definition: concrt.h:449
The Concurrency namespace provides classes and functions that provide access to the Concurrency Runti...
Definition: agents.h:42
Definition: concrt.h:4294
_CRTIMP invalid_multiple_scheduling()
Constructs an invalid_multiple_scheduling object.
#define _Pre_valid_
Definition: sal.h:677
location(const location &_Src)
Constructs a location object.
Definition: concrt.h:1925
_ElemType * _InitOnRawMalloca(void *_MallocaRet)
Definition: concrt.h:1085
_CONCRT_BUFFER _M_criticalSection[(4 *sizeof(void *)+2 *sizeof(long)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:938
_CRTIMP void reset()
Resets the event to a non-signaled state.
_CRTIMP void __cdecl Free(_Pre_maybenull_ _Post_invalid_ void *_PAllocation)
Releases a block of memory previously allocated by the Alloc method to the Concurrency Runtime Cachin...
A writer-preference queue-based reader-writer lock with local only spinning. The lock grants first in...
Definition: concrt.h:3730
void _Construct(_Ty1 *_Ptr, _Ty2 &&_Val)
Definition: xmemory0:37
volatile long _M_exitCode
An indication of the exit code of the chore. Anything non-zero here indicates cancellation of one for...
Definition: concrt.h:5225
_CRTIMP _Context(::Concurrency::Context *_PContext=NULL)
Definition: concrt.h:381
_CRTIMP scheduler_resource_allocation_error(_In_z_ const char *_Message, HRESULT _Hresult)
Constructs a scheduler_resource_allocation_error object.
This class describes an exception thrown because of a failure to acquire a critical resource in the C...
Definition: concrt.h:1211
_CRTIMP _SpinLock(volatile long &_Flag)
::Concurrency::Scheduler * _M_pScheduler
Definition: concrt.h:399
void _InternalDeleteHelper(_T *_PObject)
Definition: concrt.h:286
Definition: concrt.h:843
bool _PerformedInlineCancel() const
Definition: concrt.h:4518
#define NULL
Definition: crtdbg.h:30
_MallocaArrayHolder()
Definition: concrt.h:1071
void _Lower()
Definition: concrt.h:5469
_CRTIMP Concurrency::Scheduler * _GetScheduler()
Definition: concrt.h:396
scoped_lock_read const & operator=(const scoped_lock_read &)
volatile long _M_flags
Definition: concrt.h:5245
unsigned __int64 * PDWORD_PTR
Definition: concrt.h:117
Indicates that the location represents a paritcular execution resource.
Definition: concrt.h:2036
Definition: concrt.h:5273
Concurrency::critical_section _M_lock
Definition: concrt.h:4085
_SpinWait _SpinWaitBackoffNone
Definition: concrt.h:782
void _PrepareStealUnstructured(ContextBase *_PContext)
An event type that represents the name for an object
Definition: concrt.h:5837
void *volatile _M_pTail
Definition: concrt.h:3706
unsigned int _M_reserved
Definition: concrt.h:2138
Definition: concrt.h:497
_TaskCollectionStatus _Wait()
Waits for all chores running in the _StructuredTaskCollection to finish (normally or abnormally)...
Definition: concrt.h:4727
void * _M_pCompletionContext
Definition: concrt.h:5252
_CRTIMP void lock_read()
Acquires the reader-writer lock as a reader. If there are writers, active readers have to wait until ...
_CRTIMP context_unblock_unbalanced()
Constructs a context_unblock_unbalanced object.
void _SetRuntimeOwnsLifetime(bool fValue)
Definition: concrt.h:4347
_CONCRT_BUFFER _M_node[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:3686
A non-reentrant mutex which is explicitly aware of the Concurrency Runtime.
Definition: concrt.h:3548
_TaskCollectionStatus _Wait()
Waits for all chores running in the _TaskCollection to finish (normally or abnormally). This method encapsulates all the running tasks in an exception handling block, and will re-throw any exceptions that occur in any of it tasks (if those exceptions occur on another thread, they are marshaled from that thread to the thread where the _TaskCollection was created, and re-thrown). After this function returns, the _TaskCollection cannot be used for scheduling further work.
Definition: concrt.h:4930
void _Switch_to_active(void *_PWriter)
The writer node allocated on the stack never really owns the lock because it would go out of scope an...
location & operator=(const location &_Rhs)
Assigns the contents of a different location object to this one.
Definition: concrt.h:1962
std::exception_ptr * _M_pException
Definition: concrt.h:4587
bool _SpinOnce()
Spins for one time quantum,until a maximum spin is reached.
Definition: concrt.h:652
__declspec(noinline) static void __cdecl _StructuredChoreWrapper(_UnrealizedChore *_PChore)
_GROUP_AFFINITY * PGROUP_AFFINITY
Definition: concrt.h:52
_Condition_variable & operator=(const _Condition_variable &_Event)
_CONCRT_BUFFER _M_lockNode[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:4277
int _M_stackPos
Definition: concrt.h:5249
bool _M_fRepeating
Definition: concrt.h:5520
void _Reset()
Resets the counts and state to the default.
Definition: concrt.h:741
#define _Pre_maybenull_
Definition: sal.h:687
_CRTIMP void wait(Concurrency::critical_section &_Lck)
Waits for the _Condition_variable to become signaled. The lock argument passed in is unlocked by the ...
An event type that represents the act of a detaching from a scheduler.
Definition: concrt.h:5641
void * _CONCRT_BUFFER
Definition: concrt.h:143
_Scoped_lock(_ReaderWriterLock &_Lock)
Definition: concrt.h:991
_CRTIMP _Scheduler(::Concurrency::Scheduler *_PScheduler=NULL)
Definition: concrt.h:393
_CRTIMP ~_Condition_variable()
Destroys a _Condition_variable.
void _YieldProcessor()
Definition: concrt.h:76
_CRTIMP void set()
Signals the event.
_Scoped_lock(_ReentrantBlockingLock &_Lock)
Definition: concrt.h:815
static void __cdecl _CancelViaToken(::Concurrency::details::ContextBase *pContext)
void _Destroy(_Ty *_Ptr)
Definition: xmemory0:53
An event type that represents the conclusion of some processing
Definition: concrt.h:5807
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
#define _In_z_
Definition: sal.h:319
bool _IsCurrentlyInlined() const
Definition: concrt.h:4467
Definition: concrt.h:5775
void _Switch_to_active(void *_PLockingNode)
The node allocated on the stack never really owns the lock because it would go out of scope and the i...
_Beacon_reference * _M_pRef
Definition: concrt.h:5476
This class describes an exception thrown when the Attach method is called on a Scheduler object which...
Definition: concrt.h:1339
static _Ty _CompareAndSwap(volatile _Ty &_Location, _Ty _NewValue, _Ty _Comperand)
Definition: concrt.h:434
#define _Inout_opt_
Definition: sal.h:385
This class describes an exception thrown when an operation is performed which requires a scheduler to...
Definition: concrt.h:1313
void _PrepareSteal(ContextBase *_PContext)
_CRTIMP operation_timed_out()
Constructs an operation_timed_out object.
This class describes an exception thrown when an operation has timed out.
Definition: concrt.h:1762
This class describes an exception thrown when there are tasks still scheduled to a task_group or stru...
Definition: concrt.h:1514
HANDLE _M_hTimer
Definition: concrt.h:5514
_TaskInliningMode
The enum defines inlining scheduling policy for ppltasks. Scheduling a chore or a functor with _TaskI...
Definition: concrt.h:5270
_SpinState _M_state
Definition: concrt.h:778
bool _FastNodeIntersects(const location &_Rhs) const
Determines whether two locations have an intersection. This is a fast intersection which avoids certa...
_Ty operator++(int)
Definition: concrt.h:523
_CRTIMP void __cdecl _Trace_ppl_function(const GUID &_Guid, unsigned char _Level, ConcRT_EventType _Type)
static unsigned int _S_spinCount
Definition: concrt.h:583
This class describes an exception thrown when a task_handle object is scheduled multiple times using ...
Definition: concrt.h:1796
_CRTIMP void unlock()
Unlocks the critical section.
bool _Is_signaled() const
Definition: concrt.h:5455
critical_section & operator=(const critical_section &)
Hide assignment operator for a critical section
bool _IsEnabled(unsigned char level, unsigned long flags) const
Definition: concrt.h:5544
_CRTIMP scheduler_worker_creation_error(_In_z_ const char *_Message, HRESULT _Hresult)
Constructs a scheduler_worker_creation_error object.
_ElemNodeType * _M_Next
Definition: concrt.h:1173
#define _SAFERWLIST_SIZE
Definition: concrt.h:145
Definition: concrt.h:4425
friend class _StructuredTaskCollection
Definition: concrt.h:4386
bool _FastVPIntersects(const location &_Rhs) const
Determines whether two locations have an intersection. This is a fast intersection which avoids certa...
unsigned long _M_currentYield
Definition: concrt.h:777
void _RegisterConcRTEventTracing()
Register ConcRT as an ETW Event Provider.
size_t _GetAllocationSize() const
Definition: concrt.h:1127
reader_writer_lock & _M_reader_writer_lock
Definition: concrt.h:3855
_CRTIMP native_handle_type native_handle()
Returns a platform specific native handle, if one exists.
void _Remove_last_writer(void *_PWriter)
When the last writer leaves the lock, it needs to reset the tail to NULL so that the next coming writ...
_CRTIMP bool _IsSynchronouslyBlocked() const
_SECURITY_ATTRIBUTES * LPSECURITY_ATTRIBUTES
Definition: concrt.h:49
_NonReentrantBlockingLock & _M_lock
Definition: concrt.h:929
bool _TaskCleanup()
Performs task cleanup normally done at destruction time.
Definition: concrt.h:4763
bool _ShouldSpinAgain()
Determines whether maximum spin has been reached
Definition: concrt.h:771
This class describes an exception thrown when a messaging block is unable to find a requested message...
Definition: concrt.h:1567
_CRTIMP invalid_scheduler_policy_value()
Constructs an invalid_scheduler_policy_value object.
static _CRTIMP void __cdecl _Yield()
Definition: concrt.h:5489
_In_reads_(_N) const wchar_t *_S2
_CONCRT_BUFFER _M_writerNode[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:3856
void *volatile _M_pWaitChain
Definition: concrt.h:4153
_CRTIMP HRESULT get_error_code() const
Returns the error code that caused the exception.
_CRTIMP _Scoped_lock(_ReentrantPPLLock &_Lock)
_RefCounterBase(long _InitialCount=1)
Definition: concrt.h:4202
_Type _GetType() const
Returns the type which this location object represents.
Definition: concrt.h:2118
_CRTIMP reader_writer_lock()
Constructs a new reader_writer_lock object.
void _Flush_current_owner()
Guarantees that if any context holds the lock at the time the method is called, that context has rele...
_ReentrantBlockingLock & _M_lock
Definition: concrt.h:826
static _CRTIMP unsigned int __cdecl _Value()
An event type that marks the beginning of a start/end event pair.
Definition: concrt.h:5606
Definition: pplcancellation_token.h:104
_CRTIMP improper_scheduler_reference()
Constructs an improper_scheduler_reference object.
_Ty operator--(int)
Definition: concrt.h:531
unsigned int _GetId() const
Returns the ID which this location object represents.
Definition: concrt.h:2109
_TaskCollectionBase()
Definition: concrt.h:4435
An event type that represents the scheduling of a process
Definition: concrt.h:5819
Concurrency::details::_TaskCollectionBase * _M_pTaskCollection
Definition: concrt.h:4391
_CRTIMP void *__cdecl Alloc(size_t _NumBytes)
Allocates a block of memory of the size specified from the Concurrency Runtime Caching Suballocator...
void _Unlock_writer()
Called from unlock() when a writer is holding the lock. Writer unblocks the next writer in the list a...
static _CRTIMP location __cdecl _Current_node()
Returns a location representing the scheduling node that the calling thread is executing.
#define LONG_MIN
Definition: limits.h:42
Concurrency::critical_section _M_criticalSection
Definition: concrt.h:4249
_Ty operator+=(_Ty _Addend)
Definition: concrt.h:535
unsigned long _M_currentSpin
Definition: concrt.h:776
Definition: concrt.h:5780
bool _GetRuntimeOwnsLifetime() const
Definition: concrt.h:4354
TaskProc _M_completionHandler
Definition: concrt.h:5251
_TaskCollectionStatus
Definition: concrt.h:4423
_ReaderWriterLock & _M_lock
Definition: concrt.h:1004
bool _PerformedPendingCancel() const
Definition: concrt.h:4524
_CRTIMP critical_section()
Constructs a new critical section.
_TaskCollectionBaseState
Definition: concrt.h:4486
_CancellationTokenState * _M_pTokenState
Definition: concrt.h:4568
void _CRTIMP __cdecl _UnderlyingYield()
Default method for yielding during a spin wait
reader_writer_lock & operator=(const reader_writer_lock &_Lock)
Hide assignment operator for a reader_writer_lock
static _CRTIMP _Scheduler __cdecl _Get()
_CRTIMP void __cdecl _ReportUnobservedException()
unsigned __int64 DWORD_PTR
Definition: concrt.h:117
_CRTIMP bool try_lock()
Tries to acquire the lock without blocking.
_CRTIMP invalid_scheduler_policy_key()
Constructs an invalid_scheduler_policy_key object.
const unsigned int COOPERATIVE_TIMEOUT_INFINITE
Value indicating that a wait should never time out.
Definition: concrt.h:3538
_Chore(TaskProc _PFunction)
Definition: concrt.h:4298
unsigned long DWORD
Definition: concrt.h:57
_ElemType * _M_ElemArray
Definition: concrt.h:1111
This class describes an exception thrown when the Unblock method of a Context object is called from t...
Definition: concrt.h:1478
_CRTIMP scoped_lock(critical_section &_Critical_section)
Constructs a scoped_lock object and acquires the critical_section object passed in the _Critical_sect...
This class describes an exception thrown because of a failure to create a worker execution context in...
Definition: concrt.h:1258
~_StackGuard()
Definition: concrt.h:5292
_CRTIMP nested_scheduler_missing_detach()
Constructs a nested_scheduler_missing_detach object.
bool _M_fDetached
Definition: concrt.h:4401
An event type that represents the initiation of some processing
Definition: concrt.h:5801
void * _M_pBinding
Definition: concrt.h:2155
An event type that represents the deletion of an object
Definition: concrt.h:5813
Task collections represent groups of work which step outside the strict structuring of the _Structure...
Definition: concrt.h:4825
_CRTIMP void __cdecl wait(unsigned int _Milliseconds)
Pauses the current context for a specified amount of time.
unsigned int _GetBindingId() const
Gets the binding ID for this location.
Definition: concrt.h:2127
volatile long _M_completedStolenChores
Definition: concrt.h:4578
unsigned int _CRTIMP __cdecl _GetConcurrency()
Returns the hardware concurrency available to the Concurrency Runtime, taking into account process af...
void _UnregisterConcRTEventTracing()
Unregister ConcRT as an ETW Event Provider.
_CRTIMP ~scoped_lock()
Destroys a reader_writer_lock object and releases the lock supplied in its constructor.
An event type that represents the act of a context becoming idle.
Definition: concrt.h:5631
::Concurrency::Context * _M_pContext
Definition: concrt.h:387
_In_ wctype_t _Type
Definition: ctype.h:205
unsigned int _M_type
Definition: concrt.h:2135
An abstraction of a physical location on hardware.
Definition: concrt.h:1902
Concurrency::critical_section _M_lock
Definition: concrt.h:4154
_CRTIMP bool try_lock_for(unsigned int _Timeout)
Tries to acquire the lock without blocking for a specific number of milliseconds. ...
This class describes an exception thrown when the CurrentScheduler::Detach method is called on a cont...
Definition: concrt.h:1366
#define _W64
Definition: crtdefs.h:100
_Scoped_lock const & operator=(const _Scoped_lock &)
_Scoped_lock(_ReentrantLock &_Lock)
Definition: concrt.h:864
_Scoped_lock const & operator=(const _Scoped_lock &)
_ReentrantLock & _M_lock
Definition: concrt.h:875
bool _Set_next_writer(void *_PWriter)
Called for the first context in the writer queue. It sets the queue head and it tries to claim the lo...
_CONCRT_BUFFER _M_activeNode[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:3704
_TaskCollection * _M_pNextAlias
Definition: concrt.h:5240
void _Invoke()
Definition: concrt.h:4331
_Scoped_lock const & operator=(const _Scoped_lock &)
static _Ty _Increment(volatile _Ty &_Location)
Definition: concrt.h:444
bool _Is_system() const
Internal routine that tells whether a location represents the "system location". This indicates no sp...
Definition: concrt.h:2090
static _CRTIMP void __cdecl _Oversubscribe(bool _BeginOversubscription)
An event type that represents the act of a context yielding.
Definition: concrt.h:5626
_Scoped_lock const & operator=(const _Scoped_lock &)
bool _Acquire_lock(void *_PLockingNode, bool _FHasExternalNode)
Acquires this critical section given a specific node to lock.
void _Acquire_lock(void *_PLockingNode, bool _FHasExternalNode)
Acquires a write lock given a specific write node to lock.
_CRTIMP void __cdecl _SetUnobservedExceptionHandler(_UnobservedExceptionHandler)
This class describes an exception thrown when the Concurrency Runtime detects that you neglected to c...
Definition: concrt.h:1739
_ElemType * _AddRawMallocaNode(void *_MallocaRet)
Definition: concrt.h:1147
void _Cancel()
Cancels work on the task collection.
Definition: concrt.h:5381
This class describes an exception thrown when an unsupported operating system is used.
Definition: concrt.h:1287
location()
Constructs a location object.
Definition: concrt.h:1913
_CRTIMP bad_target()
Constructs a bad_target object.
bool _M_fRuntimeOwnsLifetime
Definition: concrt.h:4398
_CRTIMP const _CONCRT_TRACE_INFO * _GetConcRTTraceInfo()
Retrieves a pointer to the internal trace flags and level information for the Concurrency runtime ETW...
An exception safe RAII wrapper that can be used to acquire reader_writer_lock lock objects as a write...
Definition: concrt.h:3834
#define _T(x)
Definition: tchar.h:2498
_CRTIMP improper_scheduler_detach()
Constructs an improper_scheduler_detach object.
reader_writer_lock & _M_reader_writer_lock
Definition: concrt.h:3888
_StackGuard()
Definition: concrt.h:5287
_CRTIMP ~scoped_lock()
Destroys a scoped_lock object and releases the critical section supplied in its constructor.
void * _M_ptr
Definition: concrt.h:2150
_CRTIMP scheduler_not_attached()
Constructs a scheduler_not_attached object.
_CRTIMP _Condition_variable()
Constructs a new _Condition_variable.
static const unsigned int timeout_infinite
Value indicating that a wait should never time out.
Definition: concrt.h:4076
_CRTIMP void _Acquire(void *_Lock_node)
volatile long _M_executionStatus
The status of the task collection.
Definition: concrt.h:5231
static void _StoreWithRelease(volatile _Ty &_Location, _Ty _Rhs)
Definition: concrt.h:420
Concurrency::critical_section _M_criticalSection
Definition: concrt.h:4285
_TaskCollectionBase * _SafeGetParent()
Definition: concrt.h:4531
_MallocaArrayHolder & operator=(const _MallocaArrayHolder &)
static void _InternalFree(_UnrealizedChore *_PChore)
_CRTIMP bool __cdecl __uncaught_exception()
Definition: pplcancellation_token.h:212
volatile unsigned char EnableLevel
Definition: concrt.h:5530
virtual ~_MallocaArrayHolder()
Definition: concrt.h:1101
volatile long _M_numberOfWriters
Definition: concrt.h:1053
bool _IsStructured()
Definition: concrt.h:4473
long __cdecl _InterlockedDecrement(long volatile *)
_TaskCollection * _M_pOriginalCollection
Definition: concrt.h:5239
void * _Get_reader_convoy()
Called when writers are done with the lock, or when lock was free for claiming by the first reader co...
void * _M_pResetChain
Definition: concrt.h:4084
_Type
Describes the type of the given location.
Definition: concrt.h:2016
_CRTIMP ~scoped_lock_read()
Destroys a scoped_lock_read object and releases the lock supplied in its constructor.
void _DoYield()
Yields its time slice using the specified yieldFunciton
Definition: concrt.h:723
long _Release()
Definition: concrt.h:4180
_Diff _Count
Definition: algorithm:1941
Definition: concrt.h:293
volatile long _M_chaining
Definition: concrt.h:5246
_Ty operator++()
Definition: concrt.h:519
Indicates that the location represents the "system location". This has no specific affinity...
Definition: concrt.h:2021
An exception safe RAII wrapper that can be used to acquire reader_writer_lock lock objects as a reade...
Definition: concrt.h:3866
void *volatile _M_pWaitChain
Definition: concrt.h:4083
void * _M_pWriterHead
Definition: concrt.h:3958
_CRTIMP void _Acquire(void *_Lock_node)
size_t & _Depth
Definition: concrt.h:5305
unsigned long _NumberOfSpins()
Determines the current spin count
Definition: concrt.h:759
_ReentrantPPLLock & _M_lock
Definition: concrt.h:4276
_CRTIMP void unlock()
Unlocks the reader-writer lock based on who locked it, reader or writer.
Definition: amprt.h:312
volatile long _M_refCount
Definition: concrt.h:4208
This class describes an exception thrown when the Scheduler::SetDefaultSchedulerPolicy method is call...
Definition: concrt.h:1419
volatile _Ty _M_value
Definition: concrt.h:499
_ElemNodeType * _M_FirstNode
Definition: concrt.h:1182
void _Initialize(_ElemType *_Elem)
Definition: concrt.h:1074
_Chore()
Definition: concrt.h:4302
A manual reset event which is explicitly aware of the Concurrency Runtime.
Definition: concrt.h:3982
_CRT_MANAGED_FP_DEPRECATE _In_ unsigned int _Mask
Definition: float.h:120
virtual ~_UnrealizedChore()
Definition: concrt.h:4327
void _SetSpinCount(unsigned int _Count)
Set a dynamic spin count.
Definition: concrt.h:624
long __cdecl _InterlockedIncrement(long volatile *)
unsigned int _M_id
Definition: concrt.h:2147
static _Ty _FetchAndAdd(volatile _Ty &_Location, _Ty _Addend)
Definition: concrt.h:439
This class describes an exception thrown when a policy key of a SchedulerPolicy object is set to an i...
Definition: concrt.h:1650
void _EnableTrace(unsigned char level, unsigned long flags)
Definition: concrt.h:5532
_Scoped_lock_read(_ReaderWriterLock &_Lock)
Definition: concrt.h:1015
An exception safe RAII wrapper for a critical_section object.
Definition: concrt.h:3661
event & operator=(const event &_Event)
const size_t COOPERATIVE_WAIT_TIMEOUT
Value indicating that a wait timed out.
Definition: concrt.h:3529
bool _IsAbnormalExit() const
Definition: concrt.h:4500
_CRTIMP missing_wait()
Constructs a missing_wait object.
bool operator==(const location &_Rhs) const
Determines whether two location objects represent the same location.
Definition: concrt.h:1983
#define SIZE_MAX
Definition: limits.h:81
_CRTIMP void notify_one()
Notify a single waiter of the _Condition_variable.
Internal maintainence structure for beacons.
Definition: concrt.h:5427
Definition: concrt.h:5782
volatile long _M_state
Definition: concrt.h:1048
_Ty _CompareAndSwap(_Ty _NewValue, _Ty _Comperand)
Definition: concrt.h:511
_Check_return_ _In_ long _Size
Definition: io.h:325
_CRTIMP void __cdecl _Trace_agents(Agents_EventType _Type, __int64 agentId,...)
An event type that represents the act of a context blocking.
Definition: concrt.h:5616
static _CRTIMP _Context __cdecl _CurrentContext()
_CRTIMP bool try_lock_read()
Attempts to acquire the reader-writer lock as a reader without blocking.
unsigned int _M_bindingId
Definition: concrt.h:2141
_StructuredTaskCollection()
Construct a new structured task collection.
Definition: concrt.h:4628
static _CRTIMP size_t __cdecl wait_for_multiple(_In_reads_(_Count) event **_PPEvents, size_t _Count, bool _FWaitAll, unsigned int _Timeout=COOPERATIVE_TIMEOUT_INFINITE)
Waits for multiple events to become signaled.
scoped_lock const & operator=(const scoped_lock &)
_CONCRT_BUFFER _M_activeWriter[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:3956
long __cdecl _InterlockedCompareExchange(long volatile *, long, long)
_MallocaListHolder()
Definition: concrt.h:1132
static void __cdecl _InvokeBridge(void *_PContext)
Definition: concrt.h:4375
virtual void _Destroy()
Definition: concrt.h:4196
void * _M_pWriterTail
Definition: concrt.h:3959
_CRTIMP void notify_all()
Notify all the waiters of the _Condition_variable.
char * va_list
Definition: crtdefs.h:550
_Scoped_lock_read const & operator=(const _Scoped_lock_read &)
_CONCRT_BUFFER _M_lockNode[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:4241
void * HANDLE
Definition: concrt.h:66
static _CRTIMP unsigned int __cdecl _Id()
event _M_event
An event on which to wait for stolen chores to complete.
Definition: concrt.h:5237
Definition: concrt.h:5776
static _CRTIMP unsigned int __cdecl _GetNumberOfVirtualProcessors()
_CRTIMP scoped_lock(reader_writer_lock &_Reader_writer_lock)
Constructs a scoped_lock object and acquires the reader_writer_lock object passed in the _Reader_writ...
Definition: concrt.h:573
_CRTIMP ~critical_section()
Destroys a critical section.
_CRTIMP scoped_lock_read(reader_writer_lock &_Reader_writer_lock)
Constructs a scoped_lock_read object and acquires the reader_writer_lock object passed in the _Reader...
volatile long _M_lockState
Definition: concrt.h:3960
A _Condition_variable which is explicitly aware of the Concurrency Runtime.
Definition: concrt.h:4094
volatile long _M_owner
Definition: concrt.h:883
void * _OwningContext() const
Definition: concrt.h:4455
static _ChoreType * _InternalAlloc(const _Function &_Func)
Definition: concrt.h:4361
This class describes an exception thrown when the Context::Oversubscribe method is called with the _B...
Definition: concrt.h:1822
_SpinState
State of the spin wait class.
Definition: concrt.h:710
_Ty operator--()
Definition: concrt.h:527