STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
concrt.h
Go to the documentation of this file.
1 /***
2 * ==++==
3 *
4 * Copyright (c) Microsoft Corporation. All rights reserved.
5 *
6 * ==--==
7 * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
8 *
9 * concrt.h
10 *
11 * Main public header file for ConcRT. This is the only header file a C++ program must include to use the core concurrency runtime features.
12 *
13 * The Agents And Message Blocks Library and the Parallel Patterns Library (PPL) are defined in separate header files.
14 * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
15 ****/
16 
17 #pragma once
18 
19 #include <crtdefs.h>
20 
21 #if defined(BUILD_WINDOWS)
22 #error Must not be included during CRT build with _CRT_WINDOWS flag enabled
23 #endif
24 
25 #if !(defined (_M_X64) || defined (_M_IX86) || defined (_M_ARM) || defined (_M_ARM64))
26  #error ERROR: Concurrency Runtime is supported only on X64, X86, ARM, and ARM64 architectures.
27 #endif /* !(defined (_M_X64) || defined (_M_IX86) || defined (_M_ARM) || defined (_M_ARM64)) */
28 
29 #if defined (_M_CEE)
30  #error ERROR: Concurrency Runtime is not supported when compiling /clr.
31 #endif /* defined (_M_CEE) */
32 
33 #ifndef __cplusplus
34  #error ERROR: Concurrency Runtime is supported only for C++.
35 #endif /* __cplusplus */
36 
37 #define _CONCRT_H
38 
39 #include <exception>
40 #include <sal.h>
41 #include <limits.h>
42 #include <crtdbg.h>
43 #include <guiddef.h>
44 #include <intrin.h>
45 #include <new>
46 
47 #include <pplinterface.h>
48 
49 #pragma pack(push,_CRT_PACKING)
50 #pragma push_macro("new")
51 #undef new
52 
53 // Forward declare structs needed from Windows header files
54 
55 struct _SECURITY_ATTRIBUTES;
56 typedef _SECURITY_ATTRIBUTES* LPSECURITY_ATTRIBUTES;
57 
58 struct _GROUP_AFFINITY;
59 typedef _GROUP_AFFINITY* PGROUP_AFFINITY;
60 
61 // Define essential types needed from Windows header files
62 
63 typedef unsigned long DWORD;
64 #ifndef _HRESULT_DEFINED
65 #define _HRESULT_DEFINED
66 #ifdef __midl
67 typedef LONG HRESULT;
68 #else /* __midl */
69 typedef _Return_type_success_(return >= 0) long HRESULT;
70 #endif /* __midl */
71 #endif /* _HRESULT_DEFINED */
72 typedef void * HANDLE;
73 
74 #pragma push_macro("_YieldProcessor")
75 #undef _YieldProcessor
76 
77 #if (defined (_M_IX86) || defined (_M_X64))
78 #define _YieldProcessor _mm_pause
79 #else /* (defined (_M_IX86) || defined (_M_X64)) */
80 inline void _YieldProcessor() {}
81 #endif /* (defined (_M_IX86) || defined (_M_X64)) */
82 
83 #if (defined (_M_IX86) || defined (_M_ARM))
84 
85 #define _InterlockedIncrementSizeT(_Target) static_cast<size_t>(_InterlockedIncrement(reinterpret_cast<long volatile *>(_Target)))
86 #define _InterlockedDecrementSizeT(_Target) static_cast<size_t>(_InterlockedDecrement(reinterpret_cast<long volatile *>(_Target)))
87 #define _InterlockedCompareExchangeSizeT(_Target, _Exchange, _Comparand) static_cast<size_t>(_InterlockedCompareExchange( \
88  reinterpret_cast<long volatile *>(_Target), \
89  static_cast<long>(_Exchange), \
90  static_cast<long>(_Comparand)))
91 
92 typedef unsigned long DWORD_PTR, *PDWORD_PTR;
93 
94 #else /* (defined (_M_IX86) || defined (_M_ARM)) */
95 
96 #define _InterlockedIncrementSizeT(_Target) static_cast<size_t>(_InterlockedIncrement64(reinterpret_cast<__int64 volatile *>(_Target)))
97 #define _InterlockedDecrementSizeT(_Target) static_cast<size_t>(_InterlockedDecrement64(reinterpret_cast<__int64 volatile *>(_Target)))
98 #define _InterlockedCompareExchangeSizeT(_Target, _Exchange, _Comparand) static_cast<size_t>(_InterlockedCompareExchange64( \
99  reinterpret_cast<__int64 volatile *>(_Target), \
100  static_cast<__int64>(_Exchange), \
101  static_cast<__int64>(_Comparand)))
102 
103 typedef unsigned __int64 DWORD_PTR, *PDWORD_PTR;
104 
105 #endif /* (defined (_M_IX86) || defined (_M_ARM)) */
106 
107 #ifdef _DEBUG
108 #ifdef _MSC_VER
109 // Turn off compiler warnings that are exacerbated by constructs in this
110 // file's definitions:
111 
112 // Warning C4127: conditional expression is constant. This is caused by
113 // the macros with "do { ... } while (false)" syntax. The syntax is
114 // a good way to ensure that a statement-like macro can be used in all
115 // contexts (specifically if statements), but the compiler warns about
116 // the "while (false)" part.
117 
118 #define _CONCRT_ASSERT(x) __pragma (warning (suppress: 4127)) do {_ASSERTE(x); __assume(x);} while(false)
119 #else /* _MSC_VER */
120 #define _CONCRT_ASSERT(x) do {_ASSERTE(x); __assume(x);} while(false)
121 #endif /* _MSC_VER */
122 #else /* _DEBUG */
123 #define _CONCRT_ASSERT(x) __assume(x)
124 #endif /* _DEBUG */
125 
126 // Used internally to represent the smallest unit in which to allocate hidden types
127 
128 
129 typedef void * _CONCRT_BUFFER;
130 #define _LISTENTRY_SIZE ((2 * sizeof(void *) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER))
131 #define _SAFERWLIST_SIZE ((3 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER))
132 
137 
138 namespace Concurrency
139 {
153 
154 _CONCRTIMP void __cdecl wait(unsigned int _Milliseconds);
155 
170 
171 _CONCRTIMP void * __cdecl Alloc(size_t _NumBytes);
172 
185 
186 _CONCRTIMP void __cdecl Free(_Pre_maybenull_ _Post_invalid_ void * _PAllocation);
187 
192 
193 
194 #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP
195 
217 
218 _CONCRTIMP void __cdecl set_task_execution_resources(DWORD_PTR _ProcessAffinityMask);
219 
241 
242 _CONCRTIMP void __cdecl set_task_execution_resources(unsigned short _Count, PGROUP_AFFINITY _PGroupAffinity);
243 
244 #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */
245 
250 
251 typedef void (__cdecl * TaskProc)(void *);
252 
253 //
254 // Forward declarations:
255 //
256 class Scheduler;
257 class ScheduleGroup;
258 class Context;
259 
260 namespace details
261 {
262  //
263  // Forward declarations:
264  //
265  class ContextBase;
266  class _TaskCollectionBase;
267 
268  //
269  // A utility to hide operator delete from certain objects while still allowing the runtime to delete them internally.
270  //
271  template<class _Ty>
272  void _InternalDeleteHelper(_Ty * _PObject)
273  {
274  delete _PObject;
275  }
276 
277  // The purpose of the class is solely to direct allocations of ConcRT classes
278  // through a single point, using an internal allocator.
279  struct _AllocBase
280  {
281  // Standard operator new
282  void * operator new(size_t _Size)
283  {
285  }
286 
287  // Standard operator delete
288  void operator delete(void * _Ptr) throw()
289  {
290  ::Concurrency::Free(_Ptr);
291  }
292 
293  // Standard operator new, no-throw version
294  void * operator new(size_t _Size, const std::nothrow_t&) throw()
295  {
296  void * _Ptr;
297 
298  try
299  {
300  _Ptr = ::Concurrency::Alloc(_Size);
301  }
302  catch(...)
303  {
304  _Ptr = NULL;
305  }
306 
307  return (_Ptr);
308  }
309 
310  // Standard operator delete, no-throw version
311  void operator delete(void * _Ptr, const std::nothrow_t&) throw()
312  {
313  operator delete(_Ptr);
314  }
315 
316  // Standard operator new array
317  void * operator new[](size_t _Size)
318  {
319  return operator new(_Size);
320  }
321 
322  // Standard operator delete array
323  void operator delete[](void * _Ptr) throw()
324  {
325  operator delete(_Ptr);
326  }
327 
328  // Standard operator new array, no-throw version
329  void * operator new[](size_t _Size, const std::nothrow_t& _No_throw) throw ()
330  {
331  return operator new(_Size, _No_throw);
332  }
333 
334  // Standard operator delete array, no-throw version
335  void operator delete[](void * _Ptr, const std::nothrow_t& _No_throw) throw()
336  {
337  operator delete(_Ptr, _No_throw);
338  }
339 
340  // Standard operator new with void* placement
341  void * operator new(size_t, void * _Location) throw()
342  {
343  return _Location;
344  }
345 
346  // Standard operator delete with void* placement
347  void operator delete(void *, void *) throw()
348  {
349  }
350 
351  // Standard operator new array with void* placement
352  void * __cdecl operator new[](size_t, void * _Location) throw()
353  {
354  return _Location;
355  }
356 
357  // Standard operator delete array with void* placement
358  void __cdecl operator delete[](void *, void *) throw()
359  {
360  }
361  };
362 
363  // Stubs to allow the header files to access runtime functionality for WINAPI_PARTITION apps.
364  class _Context
365  {
366  public:
367  _CONCRTIMP _Context(::Concurrency::Context * _PContext = NULL) : _M_pContext(_PContext) {}
368  _CONCRTIMP static _Context __cdecl _CurrentContext();
369  _CONCRTIMP static void __cdecl _Yield();
370  _CONCRTIMP static void __cdecl _Oversubscribe(bool _BeginOversubscription);
371  _CONCRTIMP bool _IsSynchronouslyBlocked() const;
372  private:
373  ::Concurrency::Context * _M_pContext;
374  };
375 
377  {
378  public:
379  _CONCRTIMP _Scheduler(::Concurrency::Scheduler * _PScheduler = NULL) : _M_pScheduler(_PScheduler) {}
380  _CONCRTIMP unsigned int _Reference();
381  _CONCRTIMP unsigned int _Release();
382  _CONCRTIMP ::Concurrency::Scheduler * _GetScheduler() { return _M_pScheduler; }
383 
384  private:
385  ::Concurrency::Scheduler * _M_pScheduler;
386  };
387 
389  {
390  public:
391  _CONCRTIMP static void __cdecl _ScheduleTask(TaskProc _Proc, void * _Data);
392  _CONCRTIMP static unsigned int __cdecl _Id();
393  _CONCRTIMP static unsigned int __cdecl _GetNumberOfVirtualProcessors();
394  _CONCRTIMP static _Scheduler __cdecl _Get();
395  };
396 
397  //
398  // Wrappers for atomic access
399  //
400  template <size_t _Size>
401  struct _Subatomic_impl { };
402 
403  template<>
404  struct _Subatomic_impl<4> {
405  template <typename _Ty>
406  static void _StoreWithRelease(volatile _Ty& _Location, _Ty _Rhs) {
407  // For the compiler, a volatile write has release semantics. In addition, on ARM,
408  // the volatile write will emit a data memory barrier before the write.
409  _Location = _Rhs;
410  }
411 
412  template <typename _Ty>
413  static _Ty _LoadWithAquire(volatile _Ty& _Location) {
414  // For the compiler, a volatile read has acquire semantics. In addition, on ARM,
415  // the volatile read will emit a data memory barrier after the read.
416  return _Location;
417  }
418 
419  template <typename _Ty>
420  static _Ty _CompareAndSwap(volatile _Ty& _Location, _Ty _NewValue, _Ty _Comperand) {
421  return (_Ty)_InterlockedCompareExchange((volatile long*)&_Location, (long)_NewValue, (long)_Comperand);
422  }
423 
424  template <typename _Ty>
425  static _Ty _FetchAndAdd(volatile _Ty& _Location, _Ty _Addend) {
426  return (_Ty)_InterlockedExchangeAdd((volatile long*)&_Location, (long)_Addend);
427  }
428 
429  template <typename _Ty>
430  static _Ty _Increment(volatile _Ty& _Location) {
431  return (_Ty)_InterlockedIncrement((volatile long*)&_Location);
432  }
433 
434  template <typename _Ty>
435  static _Ty _Decrement(volatile _Ty& _Location) {
436  return (_Ty)_InterlockedDecrement((volatile long*)&_Location);
437  }
438  };
439 
440 #if defined (_WIN64)
441  template<>
442  struct _Subatomic_impl<8> {
443  template <typename _Ty>
444  static void _StoreWithRelease(volatile _Ty& _Location, _Ty _Rhs) {
445  // For the compiler, a volatile write has release semantics.
446  _Location = _Rhs;
447  }
448 
449  template <typename _Ty>
450  static _Ty _LoadWithAquire(volatile _Ty& _Location) {
451  // For the compiler, a volatile read has acquire semantics.
452  return _Location;
453  }
454 
455  template <typename _Ty>
456  static _Ty _CompareAndSwap(volatile _Ty& _Location, _Ty _NewValue, _Ty _Comperand) {
457  return (_Ty)_InterlockedCompareExchange64((volatile __int64*)&_Location, (__int64)_NewValue, (__int64)_Comperand);
458  }
459 
460  template <typename _Ty>
461  static _Ty _FetchAndAdd(volatile _Ty& _Location, _Ty _Addend) {
462  return (_Ty)_InterlockedExchangeAdd64((volatile __int64*)&_Location, (__int64)_Addend);
463  }
464 
465  template <typename _Ty>
466  static _Ty _Increment(volatile _Ty& _Location) {
467  return (_Ty)_InterlockedIncrement64((volatile __int64*)&_Location);
468  }
469 
470  template <typename _Ty>
471  static _Ty _Decrement(volatile _Ty& _Location) {
472  return (_Ty)_InterlockedDecrement64((volatile __int64*)&_Location);
473  }
474  };
475 #endif /* defined (_M_X64) */
476 
477 
478  //
479  // Wrapper for atomic access. Only works for 4-byte or 8-byte types (for example, int, long, long long, size_t, pointer).
480  // Anything else might fail to compile.
481  //
482  template <typename _Ty>
483  class _Subatomic {
484  private:
485  volatile _Ty _M_value;
486 
487  public:
488  operator _Ty() const volatile {
490  }
491 
492  _Ty operator=(_Ty _Rhs) {
494  return _Rhs;
495  }
496 
497  _Ty _CompareAndSwap(_Ty _NewValue, _Ty _Comperand) {
498  return _Subatomic_impl<sizeof(_Ty)>::_CompareAndSwap(_M_value, _NewValue, _Comperand);
499  }
500 
501  _Ty _FetchAndAdd(_Ty _Addend) {
503  }
504 
505  _Ty operator++() {
507  }
508 
509  _Ty operator++(int) {
511  }
512 
513  _Ty operator--() {
515  }
516 
517  _Ty operator--(int) {
519  }
520 
521  _Ty operator+=(_Ty _Addend) {
522  return _FetchAndAdd(_Addend) + _Addend;
523  }
524  };
525 
526  //
527  // An RAII class that spin-waits on a "rented" flag.
528  //
529  class _SpinLock
530  {
531  private:
532  volatile long& _M_flag;
533 
534  public:
535  _CONCRTIMP _SpinLock(volatile long& _Flag);
537 
538  private:
539  _SpinLock(const _SpinLock&);
540  void operator=(const _SpinLock&);
541  };
542 
543  //
544  // A class that holds the count used for spinning and is dependent
545  // on the number of hardware threads
546  //
547  struct _SpinCount
548  {
549  // Initializes the spinCount to either 0 or SPIN_COUNT, depending on
550  // the number of hardware threads.
551  static void __cdecl _Initialize();
552 
553  // Returns the current value of s_spinCount
554  _CONCRTIMP static unsigned int __cdecl _Value();
555 
556  // The number of iterations used for spinning
557  static unsigned int _S_spinCount;
558  };
559 
563 
564  void _CONCRTIMP __cdecl _UnderlyingYield();
565 
570 
571  unsigned int _CONCRTIMP __cdecl _GetConcurrency();
572 
576 
577  template<unsigned int _YieldCount = 1>
579  {
580  public:
581 
582  typedef void (__cdecl *_YieldFunction)();
583 
587 
588  _SpinWait(_YieldFunction _YieldMethod = _UnderlyingYield)
589  : _M_yieldFunction(_YieldMethod), _M_state(_StateInitial)
590  {
591  // Defer initialization of other fields to _SpinOnce().
592  }
593 
597 
598  void _SetSpinCount(unsigned int _Count)
599  {
600  _CONCRT_ASSERT(_M_state == _StateInitial);
601  if (_Count == 0)
602  {
603  // Specify a count of 0 if we are on a single proc.
604  _M_state = _StateSingle;
605  }
606  else
607  {
608  _M_currentSpin = _Count;
609  _M_currentYield = _YieldCount;
610  _M_state = _StateSpin;
611  }
612  }
613 
625 
626  bool _SpinOnce()
627  {
628  switch (_M_state)
629  {
630  case _StateSpin:
631  {
632  unsigned long _Count = _NumberOfSpins();
633 
634  for (unsigned long _I = 0; _I < _Count; _I++)
635  {
636  _YieldProcessor();
637  }
638 
639  if (!_ShouldSpinAgain())
640  {
641  _M_state = (_M_currentYield == 0) ? _StateBlock : _StateYield;
642  }
643 
644  return true;
645  }
646 
647  case _StateYield:
648  _CONCRT_ASSERT(_M_currentYield > 0);
649  if (--_M_currentYield == 0)
650  {
651  _M_state = _StateBlock;
652  }
653 
654  // Execute the yield
655  _DoYield();
656  return true;
657 
658  case _StateBlock:
659  // Reset to defaults if client does not block
660  _Reset();
661  return false;
662 
663  case _StateSingle:
664  // No need to spin on a single processor: just execute the yield
665  _DoYield();
666  return false;
667 
668  case _StateInitial:
669  // Reset counters to their default value and Spin once.
670  _Reset();
671  return _SpinOnce();
672  default:
673  // Unreached
674  return false;
675  };
676  }
677 
678  protected:
679 
683 
685  {
690  _StateSingle
691  };
692 
696 
697  void _DoYield()
698  {
699 #pragma warning ( push )
700 #pragma warning ( disable : 6326 ) // potential comparison of a constant with another constant
701  bool _ShouldYield = (_YieldCount != 0);
702 #pragma warning ( pop )
703  if (_ShouldYield)
704  {
705  _CONCRT_ASSERT(_M_yieldFunction != NULL);
706  _M_yieldFunction();
707  }
708  else
709  {
710  _YieldProcessor();
711  }
712  }
713 
717 
718  void _Reset()
719  {
720  _M_state = _StateInitial;
721 
722  // Reset to the default spin value. The value specified
723  // by the client is ignored on a reset.
724  _SetSpinCount(_SpinCount::_Value());
725 
726  _CONCRT_ASSERT(_M_state != _StateInitial);
727  }
728 
735 
736  unsigned long _NumberOfSpins()
737  {
738  return 1;
739  }
740 
747 
749  {
750  return (--_M_currentSpin > 0);
751  }
752 
753  unsigned long _M_currentSpin;
754  unsigned long _M_currentYield;
756  _YieldFunction _M_yieldFunction;
757  };
758 
761 
762  //
763  // This reentrant lock uses CRITICAL_SECTION and is intended for use when kernel blocking
764  // is desirable and where it is either known that the lock will be taken recursively in
765  // the same thread, or not known that a non-reentrant lock can be used safely.
766  //
768  {
769  public:
770  // Constructor for _ReentrantBlockingLock
772 
773  // Destructor for _ReentrantBlockingLock
775 
776  // Acquire the lock, spin if necessary
777  _CONCRTIMP void _Acquire();
778 
779  // Tries to acquire the lock, does not spin.
780  // Returns true if the acquisition worked, false otherwise
781  _CONCRTIMP bool _TryAcquire();
782 
783  // Releases the lock
784  _CONCRTIMP void _Release();
785 
786 
787  // An exception safe RAII wrapper.
789  {
790  public:
791  // Constructs a holder and acquires the specified lock
792  explicit _Scoped_lock(_ReentrantBlockingLock& _Lock) : _M_lock(_Lock)
793  {
794  _M_lock._Acquire();
795  }
796 
797  // Destroys the holder and releases the lock
799  {
800  _M_lock._Release();
801  }
802  private:
803  _ReentrantBlockingLock& _M_lock;
804 
805  _Scoped_lock(const _Scoped_lock&); // no copy constructor
806  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
807  };
808 
809  private:
810  // Critical section requires windows.h. Hide the implementation so that
811  // user code need not include windows.
812  _CONCRT_BUFFER _M_criticalSection[(4 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
813  };
814 
815  //
816  // This reentrant lock is a pure spin lock and is intended for use when kernel blocking
817  // is desirable and where it is either known that the lock will be taken recursively in
818  // the same thread, or not known that a non-reentrant lock can be used safely.
819  //
821  {
822  public:
823  // Constructor for _ReentrantLock
825 
826  // Acquire the lock, spin if necessary
827  _CONCRTIMP void _Acquire();
828 
829  // Tries to acquire the lock, does not spin
830  // Returns true if the acquisition worked, false otherwise
831  _CONCRTIMP bool _TryAcquire();
832 
833  // Releases the lock
834  _CONCRTIMP void _Release();
835 
836  // An exception safe RAII wrapper.
838  {
839  public:
840  // Constructs a holder and acquires the specified lock
841  explicit _Scoped_lock(_ReentrantLock& _Lock) : _M_lock(_Lock)
842  {
843  _M_lock._Acquire();
844  }
845 
846  // Destroys the holder and releases the lock
848  {
849  _M_lock._Release();
850  }
851  private:
852  _ReentrantLock& _M_lock;
853 
854  _Scoped_lock(const _Scoped_lock&); // no copy constructor
855  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
856  };
857 
858  private:
860  volatile long _M_owner;
861  };
862 
863  //
864  // This non-reentrant lock uses CRITICAL_SECTION and is intended for use in situations
865  // where it is known that the lock will not be taken recursively, and can be more
866  // efficiently implemented.
867  //
869  {
870  public:
871  // Constructor for _NonReentrantBlockingLock
872  //
873  // The constructor is exported because _NonReentrantLock is
874  // included in DevUnitTests.
876 
877  // Constructor for _NonReentrantBlockingLock
879 
880  // Acquire the lock, spin if necessary
881  _CONCRTIMP void _Acquire();
882 
883  // Tries to acquire the lock, does not spin
884  // Returns true if the lock is taken, false otherwise
885  _CONCRTIMP bool _TryAcquire();
886 
887  // Releases the lock
888  _CONCRTIMP void _Release();
889 
890  // An exception safe RAII wrapper.
892  {
893  public:
894  // Constructs a holder and acquires the specified lock
895  explicit _Scoped_lock(_NonReentrantBlockingLock& _Lock) : _M_lock(_Lock)
896  {
897  _M_lock._Acquire();
898  }
899 
900  // Destroys the holder and releases the lock
902  {
903  _M_lock._Release();
904  }
905  private:
906  _NonReentrantBlockingLock& _M_lock;
907 
908  _Scoped_lock(const _Scoped_lock&); // no copy constructor
909  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
910  };
911 
912  private:
913  // Critical section requires windows.h. Hide the implementation so that
914  // user code need not include windows.h
915  _CONCRT_BUFFER _M_criticalSection[(4 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
916  };
917 
918  //
919  // A Reader-Writer Lock is intended for use in situations with many readers and rare
920  // writers.
921  //
922  // A writer request immediately blocks future readers and then waits until all current
923  // readers drain. A reader request does not block future writers and must wait until
924  // all writers are done, even those that cut in front of it. In any race between a
925  // reader and a writer, the writer always wins.
926  //
928  {
929  public:
930  // Constructor for _ReaderWriterLock
931  //
932  // The constructor and destructor are exported because _ReaderWriterLock is
933  // included in DevUnitTests.
935 
936  // Acquire lock for reading. Spins until all writers finish, new writers
937  // can cut in front of a waiting reader.
938  _CONCRTIMP void _AcquireRead();
939 
940  // Release lock for reading. The last reader changes m_state to State.kFree
941  _CONCRTIMP void _ReleaseRead();
942 
943  // Acquire lock for writing. Spin until no readers exist, then acquire lock
944  // and prevent new readers.
945  _CONCRTIMP void _AcquireWrite();
946 
947  // Release lock for writing.
948  _CONCRTIMP void _ReleaseWrite();
949 
950  // Try to acquire the write lock, do not spin if unable to acquire.
951  // Returns true if the acquisition worked, false otherwise
953 
954  // Returns true if it is in write state, false otherwise
955  bool _HasWriteLock() const
956  {
957  return (_M_state == _Write);
958  }
959 
960  // Guarantees that all writers are out of the lock. This does nothing if there are no pending writers.
961  void _FlushWriteOwners();
962 
963  // An exception safe RAII wrapper.
965  {
966  public:
967  // Constructs a holder and acquires the writer lock
968  explicit _Scoped_lock(_ReaderWriterLock& _Lock) : _M_lock(_Lock)
969  {
971  }
972 
973  // Destroys the holder and releases the writer lock
975  {
977  }
978 
979  private:
980 
981  _ReaderWriterLock& _M_lock;
982 
983  _Scoped_lock(const _Scoped_lock&); // no copy constructor
984  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
985  };
986 
987  // An exception safe RAII wrapper for reads.
989  {
990  public:
991  // Constructs a holder and acquires the reader lock
992  explicit _Scoped_lock_read(_ReaderWriterLock& _Lock) : _M_lock(_Lock)
993  {
995  }
996 
997  // Destroys the holder and releases the reader lock
999  {
1001  }
1002 
1003  private:
1004 
1005  _ReaderWriterLock& _M_lock;
1006 
1007  _Scoped_lock_read(const _Scoped_lock_read&); // no copy constructor
1008  _Scoped_lock_read const & operator=(const _Scoped_lock_read&); // no assignment operator
1009  };
1010 
1011  private:
1012  // State enum where:
1013  // -1 --> write mode
1014  // 0 --> free
1015  // n > 0 --> n readers have locked in read mode.
1016  enum _State
1017  {
1018  _Write = -1,
1019  _Free = 0,
1020  _Read = 1
1021  };
1022 
1023  // The current state of the lock, mapping to the State enum. This is also
1024  // an indicator of the number of readers holding the lock, for any number > 0.
1025  volatile long _M_state;
1026 
1027  // A writer increments this as soon as it wants to lock and decrements this
1028  // after releasing the lock. To prevent writers from starving, a reader will
1029  // wait until this counter is zero, and only then will try to obtain the lock.
1030  volatile long _M_numberOfWriters;
1031 
1032  // Spin-Wait-Until variant
1033  static void __cdecl _WaitEquals(volatile const long& _Location, long _Value, long _Mask = 0xFFFFFFFF);
1034  };
1035 
1036  //
1037  // Exception safe RAII wrappers for _malloca()
1038  //
1039 
1040  //
1041  // _MallocaArrayHolder is used when the allocation size is known up front, and the memory must be allocated in a contiguous space
1042  //
1043  template<typename _ElemType>
1045  {
1046  public:
1047 
1049 
1050  // _Initialize takes the pointer to the memory allocated by the user using _malloca
1051  void _Initialize(_ElemType * _Elem)
1052  {
1053  // The object must be initialized exactly once
1055  _M_ElemArray = _Elem;
1056  _M_ElemsConstructed = 0;
1057  }
1058 
1059  // _InitOnRawMalloca take the raw pointer returned by _malloca directly
1060  // It will initialize itself with that pointer and return a strong typed pointer.
1061  // To be noted that the constructor will NOT be called.
1062  _ElemType * _InitOnRawMalloca(void * _MallocaRet)
1063  {
1064  if (_MallocaRet == nullptr)
1065  throw std::bad_alloc();
1066  _Initialize(static_cast<_ElemType *>(_MallocaRet));
1067  return static_cast<_ElemType *>(_MallocaRet);
1068  }
1069 
1070  // Register the next slot for destruction. Because we only keep the index of the last slot to be destructed,
1071  // this method must be called sequentially from 0 to N where N < _ElemCount.
1073  {
1074  _CONCRT_ASSERT(_M_ElemArray != NULL); // must already be initialized
1076  }
1077 
1079  {
1080  for( size_t _I=0; _I < _M_ElemsConstructed; ++_I )
1081  {
1082  _M_ElemArray[_I].~_ElemType();
1083  }
1084  // Works even when object was not initialized, that is, _M_ElemArray == NULL
1086  }
1087  private:
1088  _ElemType * _M_ElemArray;
1090 
1091  // Copy construction and assignment are not supported.
1094  };
1095 
1096  //
1097  // _MallocaListHolder is used when the allocation size is not known up front, and the elements are added to the list dynamically
1098  //
1099  template<typename _ElemType>
1101  {
1102  public:
1103  // Returns the size required to allocate the payload itself and the pointer to the next element
1104  size_t _GetAllocationSize() const
1105  {
1106  return sizeof(_ElemNodeType);
1107  }
1108 
1110  {
1111  }
1112 
1113  // Add the next element to the list. The memory is allocated in the caller's frame by _malloca
1114  void _AddNode(_ElemType * _Elem)
1115  {
1116  _ElemNodeType * _Node = reinterpret_cast<_ElemNodeType *>(_Elem);
1117  _Node->_M_Next = _M_FirstNode;
1118  _M_FirstNode = reinterpret_cast<_ElemNodeType *>(_Elem);
1119  }
1120 
1121  // _AddRawMallocaNode take the raw pointer returned by _malloca directly
1122  // It will add that bucket of memory to the list and return a strong typed pointer.
1123  // To be noted that the constructor will NOT be called.
1124  _ElemType * _AddRawMallocaNode(void * _MallocaRet)
1125  {
1126  if (_MallocaRet == nullptr)
1127  throw std::bad_alloc();
1128  _AddNode(static_cast<_ElemType *>(_MallocaRet));
1129  return static_cast<_ElemType *>(_MallocaRet);
1130  }
1131 
1132  // Walk the list and destruct, then free each element
1134  {
1135  for( _ElemNodeType * _Node = _M_FirstNode; _Node != NULL; )
1136  {
1137  auto _M_Next = _Node->_M_Next;
1138  _Node->_M_Elem._ElemType::~_ElemType();
1139  _freea(_Node);
1140  _Node = _M_Next;
1141  }
1142  }
1143 
1144  private:
1145 
1147  {
1148  friend class _MallocaListHolder;
1149  _ElemType _M_Elem;
1151  // Always instantiated using malloc, so default constructor and destructor are not needed.
1152  _ElemNodeType();
1153  ~_ElemNodeType();
1154  // Copy construction and assignment are not supported.
1155  _ElemNodeType(const _ElemNodeType & );
1157  };
1158 
1160 
1161  // Copy construction and assignment are not supported.
1164  };
1165 
1166  // Forward declarations
1168  class _TaskCollection;
1169  class _UnrealizedChore;
1170 } // namespace details
1171 
1172 //**************************************************************************
1173 // Public Namespace:
1174 //
1175 // Anything in the Concurrency namespace is intended for direct client consumption.
1176 //
1177 //**************************************************************************
1178 
1187 
1188 class scheduler_resource_allocation_error : public std::exception
1189 {
1190 public:
1200 
1201  _CONCRTIMP scheduler_resource_allocation_error(_In_z_ const char * _Message, HRESULT _Hresult) throw();
1202 
1209 
1210  explicit _CONCRTIMP scheduler_resource_allocation_error(HRESULT _Hresult) throw();
1211 
1218 
1219  _CONCRTIMP HRESULT get_error_code() const throw();
1220 
1221 private:
1222  HRESULT _Hresult;
1223 };
1224 
1234 
1236 {
1237 public:
1247 
1248  _CONCRTIMP scheduler_worker_creation_error(_In_z_ const char * _Message, HRESULT _Hresult) throw();
1249 
1256 
1257  explicit _CONCRTIMP scheduler_worker_creation_error(HRESULT _Hresult) throw();
1258 };
1259 
1263 
1264 class unsupported_os : public std::exception
1265 {
1266 public:
1273 
1274  explicit _CONCRTIMP unsupported_os(_In_z_ const char * _Message) throw();
1275 
1279 
1280  _CONCRTIMP unsupported_os() throw();
1281 };
1282 
1289 
1290 class scheduler_not_attached : public std::exception
1291 {
1292 public:
1299 
1300  explicit _CONCRTIMP scheduler_not_attached(_In_z_ const char * _Message) throw();
1301 
1305 
1307 };
1308 
1315 
1316 class improper_scheduler_attach : public std::exception
1317 {
1318 public:
1325 
1326  explicit _CONCRTIMP improper_scheduler_attach(_In_z_ const char * _Message) throw();
1327 
1331 
1333 };
1334 
1342 
1343 class improper_scheduler_detach : public std::exception
1344 {
1345 public:
1346 
1353 
1354  explicit _CONCRTIMP improper_scheduler_detach(_In_z_ const char * _Message) throw();
1355 
1359 
1361 };
1362 
1369 
1370 class improper_scheduler_reference : public std::exception
1371 {
1372 public:
1373 
1380 
1381  explicit _CONCRTIMP improper_scheduler_reference(_In_z_ const char* _Message) throw();
1382 
1386 
1388 };
1389 
1395 
1396 class default_scheduler_exists : public std::exception
1397 {
1398 public:
1405 
1406  explicit _CONCRTIMP default_scheduler_exists(_In_z_ const char * _Message) throw();
1407 
1411 
1413 };
1414 
1428 
1429 class context_unblock_unbalanced : public std::exception
1430 {
1431 public:
1438 
1439  explicit _CONCRTIMP context_unblock_unbalanced(_In_z_ const char * _Message) throw();
1440 
1444 
1446 };
1447 
1454 
1455 class context_self_unblock : public std::exception
1456 {
1457 public:
1464 
1465  explicit _CONCRTIMP context_self_unblock(_In_z_ const char * _Message) throw();
1466 
1470 
1471  _CONCRTIMP context_self_unblock() throw();
1472 };
1473 
1490 
1491 class missing_wait : public std::exception
1492 {
1493 public:
1500 
1501  explicit _CONCRTIMP missing_wait(_In_z_ const char * _Message) throw();
1502 
1506 
1507  _CONCRTIMP missing_wait() throw();
1508 };
1509 
1519 
1520 class bad_target : public std::exception
1521 {
1522 public:
1529 
1530  explicit _CONCRTIMP bad_target(_In_z_ const char * _Message) throw();
1531 
1535 
1536  _CONCRTIMP bad_target() throw();
1537 };
1538 
1543 
1544 class message_not_found : public std::exception
1545 {
1546 public:
1553 
1554  explicit _CONCRTIMP message_not_found(_In_z_ const char * _Message) throw();
1555 
1559 
1560  _CONCRTIMP message_not_found() throw();
1561 };
1562 
1569 
1570 class invalid_link_target : public std::exception
1571 {
1572 public:
1579 
1580  explicit _CONCRTIMP invalid_link_target(_In_z_ const char * _Message) throw();
1581 
1585 
1586  _CONCRTIMP invalid_link_target() throw();
1587 };
1588 
1598 
1599 class invalid_scheduler_policy_key : public std::exception
1600 {
1601 public:
1608 
1609  explicit _CONCRTIMP invalid_scheduler_policy_key(_In_z_ const char * _Message) throw();
1610 
1614 
1616 };
1617 
1626 
1627 class invalid_scheduler_policy_value : public std::exception
1628 {
1629 public:
1636 
1637  explicit _CONCRTIMP invalid_scheduler_policy_value(_In_z_ const char * _Message) throw();
1638 
1642 
1644 };
1645 
1654 
1656 {
1657 public:
1664 
1665  explicit _CONCRTIMP invalid_scheduler_policy_thread_specification(_In_z_ const char * _Message) throw();
1666 
1670 
1672 };
1673 
1688 
1689 class nested_scheduler_missing_detach : public std::exception
1690 {
1691 public:
1698 
1699  explicit _CONCRTIMP nested_scheduler_missing_detach(_In_z_ const char * _Message) throw();
1700 
1704 
1706 };
1707 
1711 
1712 class operation_timed_out : public std::exception
1713 {
1714 public:
1721 
1722  explicit _CONCRTIMP operation_timed_out(_In_z_ const char * _Message) throw();
1723 
1727 
1728  _CONCRTIMP operation_timed_out() throw();
1729 };
1730 
1745 
1746 class invalid_multiple_scheduling : public std::exception
1747 {
1748 public:
1755 
1756  explicit _CONCRTIMP invalid_multiple_scheduling(_In_z_ const char * _Message) throw();
1757 
1761 
1763 };
1764 
1771 
1772 class invalid_oversubscribe_operation : public std::exception
1773 {
1774 public:
1781 
1782  explicit _CONCRTIMP invalid_oversubscribe_operation(_In_z_ const char * _Message) throw();
1783 
1787 
1789 };
1790 
1800 
1801 class improper_lock : public std::exception
1802 {
1803 public:
1804 
1811 
1812  explicit _CONCRTIMP improper_lock(_In_z_ const char * _Message) throw();
1813 
1817 
1818  _CONCRTIMP improper_lock() throw();
1819 };
1820 
1824 
1826 {
1827 public:
1828 
1835 
1837  _M_type(_System),
1838  _M_reserved(0),
1839  _M_bindingId(0),
1840  _M_pBinding(NULL),
1841  _M_ptr(NULL)
1842  {
1843  }
1844 
1848 
1850  {
1851  _Assign(_Src);
1852  }
1853 
1854 #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP
1855 
1865 
1866  _CONCRTIMP static location __cdecl from_numa_node(unsigned short _NumaNodeNumber);
1867 
1868 #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */
1869 
1876 
1877  _CONCRTIMP static location __cdecl current();
1878 
1885 
1887  {
1888  _Assign(_Rhs);
1889  return *this;
1890  }
1891 
1895 
1897  {
1898  }
1899 
1906 
1907  bool operator==(const location& _Rhs) const
1908  {
1909  return (_M_type == _Rhs._M_type && _M_ptr == _Rhs._M_ptr);
1910  }
1911 
1918 
1919  bool operator!=(const location& _Rhs) const
1920  {
1921  return !operator==(_Rhs);
1922  }
1923 
1924  //**************************************************
1925  //
1926  // Runtime internal public pieces of location. No code outside the core of ConcRT can depend on anything
1927  // below. It is internal implementation detail:
1928  //
1929 
1933 
1934  _CONCRTIMP static location __cdecl _Current_node();
1935 
1939 
1940  enum _Type
1941  {
1945  _System, // _M_id is meaningless
1946 
1950  _NumaNode, // _M_id is the Windows NUMA node number
1951 
1955  _SchedulingNode, // _M_id is the unique identifier for the scheduling node
1956 
1960  _ExecutionResource, // _M_id is the unique identifier for the execution resource
1961  };
1962 
1966 
1967  location(_Type _LocationType, unsigned int _Id, unsigned int _BindingId = 0, _Inout_opt_ void *_PBinding = NULL);
1968 
1979 
1980  bool _FastVPIntersects(const location& _Rhs) const;
1981 
1992 
1993  bool _FastNodeIntersects(const location& _Rhs) const;
1994 
1998 
1999  void _Assign(const location& _Rhs)
2000  {
2001  _M_type = _Rhs._M_type;
2002  _M_reserved = _Rhs._M_reserved;
2003 
2004  _M_ptr = _Rhs._M_ptr;
2005 
2006  _M_bindingId = _Rhs._M_bindingId;
2007  _M_pBinding = _Rhs._M_pBinding;
2008  }
2009 
2013 
2014  bool _Is_system() const
2015  {
2016  return (_Type)_M_type == _System;
2017  }
2018 
2022 
2023  template<typename _Ty>
2024  _Ty* _As() const
2025  {
2026  return reinterpret_cast<_Ty *>(_M_pBinding);
2027  }
2028 
2032 
2033  unsigned int _GetId() const
2034  {
2035  return _M_id;
2036  }
2037 
2041 
2042  _Type _GetType() const
2043  {
2044  return (_Type)_M_type;
2045  }
2046 
2050 
2051  unsigned int _GetBindingId() const
2052  {
2053  return _M_bindingId;
2054  }
2055 
2056 private:
2057 
2058  // Indicates the type of location (as _Type)
2059  unsigned int _M_type : 28;
2060 
2061  // Flags on the location. Reserved for future use.
2062  unsigned int _M_reserved : 4;
2063 
2064  // If the location has a tight binding, this is the unique identifier of the scheduler to which the binding has specific meaning.
2065  unsigned int _M_bindingId;
2066 
2067  // Defines the agnostic (abstract hardware) binding of the location.
2068  union
2069  {
2070  // The identifier for the binding (NUMA node number, scheduler node ID, execution resource ID)
2071  unsigned int _M_id;
2072 
2073  // Pointer binding.
2074  void *_M_ptr;
2075  };
2076 
2077  // The specific binding to a scheduler. (For example, a specific virtual processor for something like location::current() )
2078  // This will be NULL if there is no tight binding.
2080 };
2081 
2082 #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP
2083 
2092 
2093 class ScheduleGroup
2094 {
2095 public:
2096 
2111 
2112  virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data) =0;
2113 
2120 
2121  virtual unsigned int Id() const =0;
2122 
2139 
2140  virtual unsigned int Reference() =0;
2141 
2160 
2161  virtual unsigned int Release() =0;
2162 
2163 protected:
2164 
2165  //
2166  // Privatize operator delete. Clients should utilize Release to relinquish a schedule group.
2167  //
2168  template<class _Ty> friend void ::Concurrency::details::_InternalDeleteHelper(_Ty * _PObject);
2169 
2170  virtual ~ScheduleGroup() {};
2171 };
2172 
2178 
2179 const unsigned int MaxExecutionResources = 0xFFFFFFFF;
2180 
2186 
2187 const unsigned int INHERIT_THREAD_PRIORITY = 0x0000F000;
2188 
2197 
2198 enum PolicyElementKey
2199 {
2206 
2207  SchedulerKind,
2208 
2218 
2219  MaxConcurrency,
2220 
2231 
2232  MinConcurrency,
2233 
2240 
2241  TargetOversubscriptionFactor,
2242 
2251 
2252  LocalContextCacheSize,
2253 
2259 
2260  ContextStackSize,
2261 
2269 
2270  ContextPriority,
2271 
2278 
2279  SchedulingProtocol,
2280 
2289 
2290  DynamicProgressFeedback,
2291 
2299 
2300  WinRTInitialization,
2301 
2305 
2306  MaxPolicyElementKey
2307 };
2308 
2315 
2316 enum SchedulerType
2317 {
2321 
2322  ThreadScheduler,
2323 
2329 
2330  UmsThreadDefault = ThreadScheduler
2331 };
2332 
2333 #pragma deprecated(UmsThreadDefault)
2334 
2341 
2342 enum SchedulingProtocolType
2343 {
2349 
2350  EnhanceScheduleGroupLocality,
2351 
2356 
2357  EnhanceForwardProgress
2358 };
2359 
2367 
2368 enum DynamicProgressFeedbackType
2369 {
2376 
2377  ProgressFeedbackDisabled,
2378 
2385 
2386  ProgressFeedbackEnabled
2387 };
2388 
2395 
2396 enum WinRTInitializationType
2397 {
2402 
2403  InitializeWinRTAsMTA,
2404 
2409 
2410  DoNotInitializeWinRT
2411 };
2412 
2425 
2426 class SchedulerPolicy
2427 {
2428 public:
2429 
2448 
2449  _CONCRTIMP SchedulerPolicy();
2450 
2472 
2473  _CONCRTIMP SchedulerPolicy(size_t _PolicyKeyCount, ...);
2474 
2496 
2497  _CONCRTIMP SchedulerPolicy(const SchedulerPolicy& _SrcPolicy);
2498 
2515 
2516  _CONCRTIMP SchedulerPolicy& operator=(const SchedulerPolicy& _RhsPolicy);
2517 
2521 
2522  _CONCRTIMP ~SchedulerPolicy();
2523 
2539 
2540  _CONCRTIMP unsigned int GetPolicyValue(PolicyElementKey _Key) const;
2541 
2565 
2566  _CONCRTIMP unsigned int SetPolicyValue(PolicyElementKey _Key, unsigned int _Value);
2567 
2586 
2587  _CONCRTIMP void SetConcurrencyLimits(unsigned int _MinConcurrency, unsigned int _MaxConcurrency = MaxExecutionResources);
2588 
2597 
2598  void _ValidateConcRTPolicy() const;
2599 
2600 private:
2601 
2602  struct _PolicyBag
2603  {
2604  union
2605  {
2606  unsigned int _M_pPolicyBag[MaxPolicyElementKey];
2607  struct
2608  {
2609  SchedulerType _M_schedulerKind;
2610  unsigned int _M_maxConcurrency;
2611  unsigned int _M_minConcurrency;
2612  unsigned int _M_targetOversubscriptionFactor;
2613  unsigned int _M_localContextCacheSize;
2614  unsigned int _M_contextStackSize;
2615  unsigned int _M_contextPriority;
2616  SchedulingProtocolType _M_schedulingProtocol;
2617  DynamicProgressFeedbackType _M_dynamicProgressFeedback;
2618  WinRTInitializationType _M_WinRTInitialization;
2619  } _M_specificValues;
2620  } _M_values;
2621  } *_M_pPolicyBag;
2622 
2626 
2627  void _Initialize(size_t _PolicyKeyCount, va_list * _PArgs);
2628 
2632 
2633  void _Assign(const SchedulerPolicy& _SrcPolicy);
2634 
2638 
2639  static bool __cdecl _ValidPolicyKey(PolicyElementKey _Key);
2640 
2644 
2645  static bool __cdecl _ValidPolicyValue(PolicyElementKey _Key, unsigned int _Value);
2646 
2650 
2651  static bool __cdecl _AreConcurrencyLimitsValid(unsigned int _MinConcurrency, unsigned int _MaxConcurrency);
2652  bool _AreConcurrencyLimitsValid() const;
2653 
2657 
2658  bool _ArePolicyCombinationsValid() const;
2659 
2663 
2664  void _ResolvePolicyValues();
2665 
2669 
2670  static char * __cdecl _StringFromPolicyKey(unsigned int _Index);
2671 };
2672 
2684 
2685 class CurrentScheduler
2686 {
2687 private:
2688  CurrentScheduler() {}
2689 
2690 public:
2700 
2701  _CONCRTIMP static unsigned int __cdecl Id();
2702 
2714 
2715  _CONCRTIMP static SchedulerPolicy __cdecl GetPolicy();
2716 
2728 
2729  _CONCRTIMP static Scheduler * __cdecl Get();
2730 
2743 
2744  _CONCRTIMP static unsigned int __cdecl GetNumberOfVirtualProcessors();
2745 
2768 
2769  _CONCRTIMP static void __cdecl Create(const SchedulerPolicy& _Policy);
2770 
2787 
2788  _CONCRTIMP static void __cdecl Detach();
2789 
2803 
2804  _CONCRTIMP static void __cdecl RegisterShutdownEvent(HANDLE _ShutdownEvent);
2805 
2826 
2827  _CONCRTIMP static ScheduleGroup * __cdecl CreateScheduleGroup();
2828 
2852 
2853  _CONCRTIMP static ScheduleGroup * __cdecl CreateScheduleGroup(location& _Placement);
2854 
2873 
2874  _CONCRTIMP static void __cdecl ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data);
2875 
2897 
2898  _CONCRTIMP static void __cdecl ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data, location& _Placement);
2899 
2916 
2917  _CONCRTIMP static bool __cdecl IsAvailableLocation(const location& _Placement);
2918 };
2919 
2934 
2935 class Scheduler
2936 {
2937 protected:
2951 
2952  Scheduler() {}
2953 
2957 
2958  virtual ~Scheduler() {}
2959 
2960 public:
2961 
2985 
2986  _CONCRTIMP static Scheduler * __cdecl Create(const SchedulerPolicy& _Policy);
2987 
2994 
2995  virtual unsigned int Id() const =0;
2996 
3005 
3006  virtual unsigned int GetNumberOfVirtualProcessors() const =0;
3007 
3008 
3018 
3019  virtual SchedulerPolicy GetPolicy() const =0;
3020 
3035 
3036  virtual unsigned int Reference() =0 ;
3037 
3050 
3051  virtual unsigned int Release() =0;
3052 
3061 
3062  virtual void RegisterShutdownEvent(HANDLE _Event) =0;
3063 
3079 
3080  virtual void Attach() =0;
3081 
3099 
3100  _CONCRTIMP static void __cdecl SetDefaultSchedulerPolicy(const SchedulerPolicy& _Policy);
3101 
3113 
3114  _CONCRTIMP static void __cdecl ResetDefaultSchedulerPolicy();
3115 
3133 
3134  virtual ScheduleGroup * CreateScheduleGroup() =0;
3135 
3156 
3157  virtual ScheduleGroup * CreateScheduleGroup(location& _Placement) =0;
3158 
3172 
3173  virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data) =0;
3174 
3191 
3192  virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data, location& _Placement) =0;
3193 
3208 
3209  virtual bool IsAvailableLocation(const location& _Placement) const =0;
3210 };
3211 
3228 
3229 class Context
3230 {
3231 public:
3238 
3239  virtual unsigned int GetId() const =0;
3240 
3253 
3254  virtual unsigned int GetVirtualProcessorId() const =0;
3255 
3268 
3269  virtual unsigned int GetScheduleGroupId() const =0;
3270 
3278 
3279  _CONCRTIMP static unsigned int __cdecl Id();
3280 
3293 
3294  _CONCRTIMP static unsigned int __cdecl VirtualProcessorId();
3295 
3304 
3305  _CONCRTIMP static unsigned int __cdecl ScheduleGroupId();
3306 
3327 
3328  _CONCRTIMP static void __cdecl Block();
3329 
3350 
3351  virtual void Unblock() =0;
3352 
3368 
3369  virtual bool IsSynchronouslyBlocked() const =0;
3370 
3380 
3381  _CONCRTIMP static void __cdecl _SpinYield();
3382 
3393 
3394  _CONCRTIMP static void (__cdecl Yield)();
3395 
3406 
3407  static inline void __cdecl YieldExecution()
3408  {
3409  (Yield)();
3410  }
3411 
3422 
3423  _CONCRTIMP static bool __cdecl IsCurrentTaskCollectionCanceling();
3424 
3435 
3436  _CONCRTIMP static Context * __cdecl CurrentContext();
3437 
3447 
3448  _CONCRTIMP static void __cdecl Oversubscribe(bool _BeginOversubscription);
3449 
3450 protected:
3451 
3452  //
3453  // Privatize operator delete. The scheduler internally manages contexts.
3454  //
3455  template<class _Ty> friend void ::Concurrency::details::_InternalDeleteHelper(_Ty * _PObject);
3456 
3457  virtual ~Context() {};
3458 };
3459 
3460 #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */
3461 
3468 
3470 
3477 
3478 const unsigned int COOPERATIVE_TIMEOUT_INFINITE = (unsigned int)-1;
3479 
3487 
3489 {
3490 public:
3491 
3495 
3497 
3505 
3507 
3519 
3520  _CONCRTIMP void lock();
3521 
3529 
3530  _CONCRTIMP bool try_lock();
3531 
3542 
3543  _CONCRTIMP bool try_lock_for(unsigned int _Timeout);
3544 
3550 
3551  _CONCRTIMP void unlock();
3552 
3556 
3557  typedef critical_section& native_handle_type;
3558 
3569 
3570  _CONCRTIMP native_handle_type native_handle();
3571 
3579 
3580  void _Flush_current_owner();
3581 
3594 
3595  bool _Acquire_lock(void * _PLockingNode, bool _FHasExternalNode);
3596 
3600 
3602  {
3603  public:
3604 
3613 
3614  explicit _CONCRTIMP scoped_lock(critical_section& _Critical_section);
3615 
3620 
3622 
3623  private:
3624 
3625  critical_section& _M_critical_section;
3626  _CONCRT_BUFFER _M_node[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
3627 
3628  scoped_lock(const scoped_lock&); // no copy constructor
3629  scoped_lock const & operator=(const scoped_lock&); // no assignment operator
3630  };
3631 
3632 private:
3641 
3642  void _Switch_to_active(void * _PLockingNode);
3643 
3644  _CONCRT_BUFFER _M_activeNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
3645  void * volatile _M_pHead;
3646  void * volatile _M_pTail;
3647 
3651 
3653 
3657 
3659 };
3660 
3669 
3671 {
3672 public:
3673 
3677 
3679 
3687 
3689 
3703 
3704  _CONCRTIMP void lock();
3705 
3713 
3714  _CONCRTIMP bool try_lock();
3715 
3727 
3728  _CONCRTIMP void lock_read();
3729 
3737 
3738  _CONCRTIMP bool try_lock_read();
3739 
3751 
3752  _CONCRTIMP void unlock();
3753 
3767 
3768  void _Acquire_lock(void * _PLockingNode, bool _FHasExternalNode);
3769 
3773 
3775  {
3776  public:
3784 
3785  explicit _CONCRTIMP scoped_lock(reader_writer_lock& _Reader_writer_lock);
3786 
3790 
3792 
3793  private:
3794 
3795  reader_writer_lock& _M_reader_writer_lock;
3796  _CONCRT_BUFFER _M_writerNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
3797 
3798  scoped_lock(const scoped_lock&); // no copy constructor
3799  scoped_lock const & operator=(const scoped_lock&); // no assignment operator
3800  };
3801 
3805 
3807  {
3808  public:
3817 
3818  explicit _CONCRTIMP scoped_lock_read(reader_writer_lock& _Reader_writer_lock);
3819 
3823 
3825 
3826  private:
3827 
3829 
3830  scoped_lock_read(const scoped_lock_read&); // no copy constructor
3831  scoped_lock_read const & operator=(const scoped_lock_read&); // no assignment operator
3832  };
3833 
3834 private:
3835 
3843 
3844  bool _Set_next_writer(void * _PWriter);
3845 
3855 
3856  void * _Get_reader_convoy();
3857 
3863 
3864  void _Unlock_writer();
3865 
3870 
3871  void _Unlock_reader();
3872 
3882 
3883  void _Remove_last_writer(void * _PWriter);
3884 
3893 
3894  void _Switch_to_active(void * _PWriter);
3895 
3896  _CONCRT_BUFFER _M_activeWriter[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
3900  volatile long _M_lockState;
3901 
3905 
3906  reader_writer_lock (const reader_writer_lock& _Lock);
3907 
3911 
3913 };
3914 
3921 
3922 class event
3923 {
3924 public:
3925 
3929 
3930  _CONCRTIMP event();
3931 
3939 
3940  _CONCRTIMP ~event();
3941 
3956 
3957  _CONCRTIMP size_t wait(unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE);
3958 
3967 
3968  _CONCRTIMP void set();
3969 
3975 
3976  _CONCRTIMP void reset();
3977 
4009 
4010  _CONCRTIMP static size_t __cdecl wait_for_multiple(_In_reads_(_Count) event ** _PPEvents, size_t _Count, bool _FWaitAll, unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE);
4011 
4012 
4016  static const unsigned int timeout_infinite = COOPERATIVE_TIMEOUT_INFINITE;
4017 private:
4018 
4019  // Prevent bad usage of copy-constructor and copy-assignment
4020  event(const event& _Event);
4021  event& operator=(const event& _Event);
4022 
4023  void * volatile _M_pWaitChain;
4026 };
4027 
4028 namespace details
4029 {
4030  // Base class for all reference counted objects
4032  {
4033  public:
4034 
4036  {
4038  }
4039 
4040  // Acquires a reference
4041  // Returns the new reference count.
4042  long _Reference()
4043  {
4044  long _Refcount = _InterlockedIncrement(&_M_refCount);
4045 
4046  // 0 - 1 transition is illegal
4047  _CONCRT_ASSERT(_Refcount > 1);
4048  return _Refcount;
4049  }
4050 
4051  // Releases the reference
4052  // Returns the new reference count
4053  long _Release()
4054  {
4055  long _Refcount = _InterlockedDecrement(&_M_refCount);
4056  _CONCRT_ASSERT(_Refcount >= 0);
4057 
4058  if (_Refcount == 0)
4059  {
4060  _Destroy();
4061  }
4062 
4063  return _Refcount;
4064  }
4065 
4066  protected:
4067 
4068  // Allow derived classes to provide their own deleter
4069  virtual void _Destroy()
4070  {
4071  delete this;
4072  }
4073 
4074  // Only allow instantiation through derived class
4075  _RefCounterBase(long _InitialCount = 1) : _M_refCount(_InitialCount)
4076  {
4078  }
4079 
4080  // Reference count
4081  volatile long _M_refCount;
4082  };
4083 
4086 
4087  // This is a non-reentrant lock wrapper around the ConcRT critical-section
4088  // and used by agents/messaging
4090  {
4091  public:
4092 
4093  // Constructor for _NonReentrantPPLLock
4095  _NonReentrantPPLLock(const _NonReentrantPPLLock&) = delete;
4096 
4097  _NonReentrantPPLLock& operator=(const _NonReentrantPPLLock&) = delete;
4098  // Acquire the lock, spin if necessary
4099  _CONCRTIMP void _Acquire(void * _Lock_node);
4100 
4101  // Releases the lock
4102  _CONCRTIMP void _Release();
4103 
4104  // An exception safe RAII wrapper.
4106  {
4107  public:
4108  // Constructs a holder and acquires the specified lock
4109  _CONCRTIMP explicit _Scoped_lock(_NonReentrantPPLLock& _Lock);
4110 
4111  _Scoped_lock(const _Scoped_lock&) = delete;
4112  _Scoped_lock& operator=(const _Scoped_lock&) = delete;
4113 
4114  // Destroys the holder and releases the lock
4116 
4117  private:
4118  _NonReentrantPPLLock& _M_lock;
4119  _CONCRT_BUFFER _M_lockNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
4120  };
4121 
4122  private:
4123  // critical_section
4125  };
4126 
4127  // This is a reentrant lock implemented using the ConcRT critical section
4129  {
4130  public:
4131  // Constructor for _ReentrantPPLLock
4133  _ReentrantPPLLock(const _ReentrantPPLLock&) = delete;
4134 
4135  _ReentrantPPLLock& operator=(const _ReentrantPPLLock&) = delete;
4136 
4137  // Acquire the lock, spin if necessary
4138  _CONCRTIMP void _Acquire(void * _Lock_node);
4139 
4140  // Releases the lock
4141  _CONCRTIMP void _Release();
4142 
4143  // An exception safe RAII wrapper.
4145  {
4146  public:
4147  // Constructs a holder and acquires the specified lock
4148  _CONCRTIMP explicit _Scoped_lock(_ReentrantPPLLock& _Lock);
4149 
4150  // Destroys the holder and releases the lock
4152 
4153  private:
4154  _ReentrantPPLLock& _M_lock;
4155  _CONCRT_BUFFER _M_lockNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
4156 
4157  _Scoped_lock(const _Scoped_lock&); // no copy constructor
4158  _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator
4159  };
4160 
4161  private:
4162  // critical_section
4164 
4165  // The number of times this lock has been taken recursively
4167 
4168  // The current owner of the lock
4169  volatile long _M_owner;
4170  };
4171 
4172  struct _Chore
4173  {
4174  protected:
4175  // Constructors.
4176  explicit _Chore(TaskProc _PFunction) : m_pFunction(_PFunction)
4177  {
4178  }
4179 
4181  {
4182  }
4183 
4184  virtual ~_Chore()
4185  {
4186  }
4187 
4188  public:
4189 
4190  // The function which invokes the work of the chore.
4192  };
4193 
4194  // _UnrealizedChore represents an unrealized chore -- a unit of work that scheduled in a work
4195  // stealing capacity. Some higher level construct (language or library) will map atop this to provide
4196  // a usable abstraction to clients.
4197  class _UnrealizedChore : public _Chore, public _AllocBase
4198  {
4199  public:
4200  // Constructor for an unrealized chore.
4203  {
4204  }
4205  virtual ~_UnrealizedChore() {}
4206 
4207 
4208  // Method that executes the unrealized chore.
4209  void _Invoke()
4210  {
4211  _M_pChoreFunction(this);
4212  }
4213 
4214  // Sets the attachment state of the chore at the time of stealing.
4215  void _SetDetached(bool _FDetached);
4216 
4217  // Returns the owning collection of the chore.
4219  {
4220  return _M_pTaskCollection;
4221  }
4222 
4223  // Set flag that indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
4224  // The flag is ignored by _StructuredTaskCollection
4225  void _SetRuntimeOwnsLifetime(bool _FValue)
4226  {
4227  _M_fRuntimeOwnsLifetime = _FValue;
4228  }
4229 
4230  // Returns the flag that indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
4231  // The flag is ignored by _StructuredTaskCollection
4233  {
4234  return _M_fRuntimeOwnsLifetime;
4235  }
4236 
4237  // Allocator to be used when runtime owns lifetime.
4238  template <typename _ChoreType, typename _Function>
4239  static _ChoreType * _InternalAlloc(const _Function& _Func)
4240  {
4241  // This is always invoked from the PPL layer by the user and can never be attached to the default scheduler. Therefore '_concrt_new' is not required here
4242  _ChoreType * _Chore = new _ChoreType(_Func);
4243  _Chore->_M_fRuntimeOwnsLifetime = true;
4244  return _Chore;
4245  }
4246 
4247  // Internal helper routine to prepare for execution as a stolen chore.
4248  void _PrepareSteal(ContextBase *_PContext);
4249 
4250  protected:
4251  // Invocation bridge between the _UnrealizedChore and PPL.
4252  template <typename _ChoreType>
4253  static void __cdecl _InvokeBridge(void * _PContext)
4254  {
4255  auto _PChore = static_cast<_ChoreType *>(_PContext);
4256  (*_PChore)();
4257  }
4258 
4259  // Place associated task collection in a safe state.
4261 
4262  private:
4263 
4265  friend class _TaskCollection;
4266  typedef void (__cdecl * CHOREFUNC)(_UnrealizedChore * _PChore);
4267 
4268  // The collection of work to which this particular chore belongs.
4270 
4271  // Internal invocation inside the scheduler.
4273 
4274  // Indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
4275  // This flag is ignored by _StructuredTaskCollection
4277 
4278  // An indication of whether the chore (if stolen) was detached.
4280 
4281  // Helper routines
4282  void _PrepareStealStructured(ContextBase *_PContext);
4283  void _PrepareStealUnstructured(ContextBase *_PContext);
4284 
4285  // The internal wrapper around invocation of stolen structured chores.
4286  __declspec(noinline)
4287  static void __cdecl _StructuredChoreWrapper(_UnrealizedChore * _PChore);
4288 
4289  // The internal wrapper around invocation of stolen unstructured chores.
4290  __declspec(noinline)
4291  static void __cdecl _UnstructuredChoreWrapper(_UnrealizedChore * _PChore);
4292 
4293  // To free memory allocated with _InternalAlloc.
4294  static void _InternalFree(_UnrealizedChore * _PChore);
4295 
4296  // Cancellation via token to a stolen chore
4297  static void __cdecl _CancelViaToken(::Concurrency::details::ContextBase *_PContext);
4298  };
4299 
4300  // Represents possible results of waiting on a task collection.
4302  {
4306  };
4307 
4308  // _TaskCollectionBase represents an abstract set of work and provides shared waiting semantics for stolen work.
4310  {
4311  public:
4312  // Constructs a new task collection.
4314  _M_pTokenState(NULL),
4315  _M_completedStolenChores(_CollectionNotInitialized),
4316  _M_unpoppedChores(0),
4317  _M_pException(NULL),
4318  _M_inliningDepth(_S_notInlined)
4319  {
4320  }
4321  _TaskCollectionBase(const _TaskCollectionBase&) = delete;
4322 
4323  // Constructs a new task collection based on a given cancellation token.
4325  _M_pTokenState(_PTokenState),
4326  _M_completedStolenChores(_CollectionNotInitialized),
4327  _M_unpoppedChores(0),
4328  _M_pException(NULL),
4329  _M_inliningDepth(_S_notInlined)
4330  {
4331  }
4332 
4334 
4335  // Returns the owning context of the task collection.
4336  void * _OwningContext() const
4337  {
4338  return _M_pOwningContext;
4339  }
4340 
4341  // Returns the inlining depth.
4342  int _InliningDepth() const
4343  {
4344  return _M_inliningDepth;
4345  }
4346 
4347  // Tells if the task collection is inlined - some thread somewhere is currently invoking wait on it.
4348  bool _IsCurrentlyInlined() const
4349  {
4350  return (_M_inliningDepth != _S_notInlined);
4351  }
4352 
4353  // Returns whether this is a structured collection or not.
4355  {
4356  return (_M_inlineFlags & _S_structured) != 0;
4357  }
4358 
4359  // Returns the token state associated with this task collection
4360  _CancellationTokenState *_GetTokenState(_CancellationTokenRegistration **_PRegistration = NULL);
4361 
4362  protected:
4363 
4364  friend class ::Concurrency::details::_UnrealizedChore;
4365  friend class ::Concurrency::details::ContextBase;
4366 
4368  {
4369  _CollectionNotInitialized = LONG_MIN,
4370  _CollectionInitializationInProgress = LONG_MIN+1,
4371  _CollectionInitialized = 0
4372  };
4373 
4374  // Returns the exception portion of _M_pException.
4375  std::exception_ptr * _Exception() const
4376  {
4377  return (std::exception_ptr *) ((size_t)_M_pException & ~_S_cancelBitsMask);
4378  }
4379 
4380  // Indicates whether or not this task collection has an abnormal exit.
4381  bool _IsAbnormalExit() const
4382  {
4383  return _M_pException != NULL;
4384  }
4385 
4386  // Returns the cancel flags.
4387  size_t _CancelState() const
4388  {
4389  return (size_t) _M_pException & _S_cancelBitsMask;
4390  }
4391 
4392  // Returns whether or not the collection is marked for cancellation.
4394  {
4395  return (_CancelState() & _S_cancelBitsMask) != 0;
4396  }
4397 
4398  // Returns whether an inline cancellation was performed.
4400  {
4401  _CONCRT_ASSERT(_CancelState() != _S_cancelStarted);
4402  return _CancelState() == _S_cancelShotdownOwner;
4403  }
4404 
4406  {
4407  _CONCRT_ASSERT(_CancelState() != _S_cancelStarted);
4408  return _CancelState() == _S_cancelDeferredShootdownOwner;
4409  }
4410 
4411  // Returns the parent collection safely.
4413  {
4414  return ((_M_inliningDepth != _S_notInlined) ? _M_pParent : NULL);
4415  }
4416 
4417  // Called in order to determine whether this task collection will interrupt for a pending cancellation at or above it.
4418  bool _WillInterruptForPendingCancel();
4419 
4420  // Called when an exception is raised on a chore on a given task collection, this makes a determination of what to do with the exception
4421  // and saves it for potential transport back to the thread performing a join on a chore collection.
4422  void _RaisedException();
4423 
4424  // Potentially rethrows the exception which was set with _RaisedException. The caller has responsibility to ensure that _RaisedException
4425  // was called prior to calling this and that _M_pException has progressed beyond the _S_nonNull state.
4426  void _RethrowException();
4427 
4428  // Marks the collection for cancellation and returns whether the collection was marked.
4429  bool _MarkCancellation();
4430 
4431  // Finishes the cancellation state (changing from _S_cancelStarted to one of the other states). Note that only the
4432  // thread which successfully marked cancellation can call this.
4433  void _FinishCancelState(size_t _NewCancelState);
4434 
4435  // Called when a cancellation is raised on a chore on a given task collection. This makes a determination of what to do with the exception
4436  // and saves it for potential transport back to the thread performing a join on a chore collection. Note that every other exception
4437  // has precedence over a cancellation.
4438  void _RaisedCancel();
4439 
4440  // Tracks the parent collection. (For example, A task collection B created during execution of a chore C on task collection A is
4441  // considered a child of A).
4443 
4444  // Tracks the inlining depth of this collection for cancellation purposes and packs a series of definition bits.
4445  int _M_inliningDepth : 28;
4446  int _M_inlineFlags : 4;
4447 
4448  // The cancellation token for the task collection.
4450 
4451  // The context which owns the task collection. This is the context where the collection is created.
4453 
4454  // The number of unpopped chores associated with the task collection (set by the derived
4455  // class during chore association.
4457 
4458  // The number of stolen chores executed so far.
4460 
4461  // The stored exception which has been marshaled from the thread a stolen chore ran upon to the thread that is waiting on the
4462  // task collection.
4463  //
4464  // The lower two bits of _M_pException are utilized for the cancellation state machine. The upper 30 are the exception pointer. This implies
4465  // that the exception pointer must be 4-byte aligned. Because of intermediate states, the exception pointer cannot be between 0x8 and 0xF. The heap should
4466  // not be allocating such...
4467  //
4468  std::exception_ptr * _M_pException;
4469 
4470  // Cancellation states
4471  static const size_t _S_cancelBitsMask = 0x3;
4472  static const size_t _S_cancelNone = 0x0;
4473  static const size_t _S_cancelStarted = 0x1;
4474  static const size_t _S_cancelDeferredShootdownOwner = 0x2;
4475  static const size_t _S_cancelShotdownOwner = 0x3;
4476 
4477  // Intermediate exceptions.
4478  static const size_t _S_nonNull = 0x8;
4479  static const size_t _S_cancelException = 0xC;
4480 
4481  // initialization state for inlining depth.
4482  static const int _S_notInlined = -1;
4483 
4484  // Inline flags.
4485  static const int _S_structured = 0x00000001;
4486  static const int _S_localCancel = 0x00000002;
4487  static const int _S_reserved = 0x0000000C;
4488  };
4489 
4494 
4496  {
4497  public:
4498 
4502 
4504  {
4505  _Construct();
4506  _M_pTokenState = NULL;
4507  }
4508 
4517 
4519 
4526 
4528 
4543 
4544  _CONCRTIMP void _Schedule(_UnrealizedChore * _PChore, location * _PLocation);
4545 
4556 
4557  _CONCRTIMP void _Schedule(_UnrealizedChore * _PChore);
4558 
4562 
4563  _CONCRTIMP void _Cancel();
4564 
4575 
4576  _CONCRTIMP bool _IsCanceling();
4577 
4591 
4592  _CONCRTIMP _TaskCollectionStatus __stdcall _RunAndWait(_UnrealizedChore * _PChore = NULL);
4593 
4603 
4605  {
4606  return _RunAndWait();
4607  }
4608 
4612 
4613  void _CancelStolenContexts();
4614 
4615  private:
4616 
4617  friend class _UnrealizedChore;
4618 
4619  void _Construct()
4620  {
4621  _M_pOwningContext = NULL;
4622  _M_inlineFlags = _S_structured;
4623  }
4624 
4628 
4629  _CONCRTIMP void _Abort();
4630 
4634  _CONCRTIMP void _CleanupToken();
4635 
4639 
4641  {
4642  //
4643  // Users are required to call Wait() before letting the destructor run. Otherwise, throw. Note that before throwing,
4644  // we must actually wait on the tasks because they contain pointers into stack frames and unwinding without the wait is
4645  // instant stack corruption.
4646  //
4647  if (_M_unpoppedChores > 0)
4648  {
4649  _Abort();
4650 
4651  if (!__uncaught_exception())
4652  {
4653  return false;
4654  }
4655  }
4656 
4657  return true;
4658  }
4659 
4663 
4664  void _Initialize();
4665 
4672 
4673  void _WaitOnStolenChores(long _StolenChoreCount);
4674 
4678 
4679  void _CountUp();
4680 
4685 
4686  static void __cdecl _CancelViaToken(_StructuredTaskCollection *_PCollection);
4687 
4688  //
4689  // _StructuredTaskCollection::_M_event is used to construct an structured event object only when it is needed to block. The structured event object
4690  // has no state to cleanup, therefore no dtor code is required.
4691  //
4692  _CONCRT_BUFFER _M_event[(sizeof(void*) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)];
4693  };
4694 
4701 
4703  {
4704  public:
4705 
4709 
4711  _TaskCollection(const _TaskCollection&) = delete;
4712 
4713  _TaskCollection& operator=(const _TaskCollection&) = delete;
4720 
4722 
4729 
4730  _CONCRTIMP ~_TaskCollection();
4731 
4746 
4747  _CONCRTIMP void _Schedule(_UnrealizedChore * _PChore, location * _PLocation);
4748 
4759 
4760  _CONCRTIMP void _Schedule(_UnrealizedChore * _PChore);
4761 
4765 
4766  _CONCRTIMP void _Cancel();
4767 
4778 
4779  _CONCRTIMP bool _IsCanceling();
4780 
4795 
4796  _CONCRTIMP _TaskCollectionStatus __stdcall _RunAndWait(_UnrealizedChore * _PChore = NULL);
4797 
4808 
4810  {
4811  return _RunAndWait();
4812  }
4813 
4817 
4818  bool _IsMarkedForAbnormalExit() const;
4819 
4823 
4824  _TaskCollection * _OriginalCollection() const;
4825 
4829 
4830  bool _IsAlias() const;
4831 
4841 
4842  void _RegisterCompletionHandler(TaskProc _Func, void * _PCompletionContext);
4843 
4844  private:
4845 
4846  friend class _UnrealizedChore;
4847  friend class ::Concurrency::details::ContextBase;
4848 
4853 
4854  bool _IsStaleAlias() const;
4855 
4859 
4860  void _ReleaseAlias();
4861 
4871 
4872  _TaskCollection(_TaskCollection * _POriginCollection, bool _FDirectAlias);
4873 
4877 
4878  _TaskCollection * _Alias();
4879 
4886 
4887  void _Abort(bool _FLeaveCanceled = false);
4888 
4892 
4893  bool _IsIndirectAlias() const;
4894 
4898 
4899  bool _IsDirectAlias() const;
4900 
4904 
4905  bool _HasDirectAlias() const;
4906 
4917 
4918  void _Cancel(bool _InsideException, _TaskCollection * _PSnapPoint);
4919 
4923 
4924  void _NotifyNewChore();
4925 
4932 
4933  void _NotifyCompletedChoreAndFree(_UnrealizedChore * _PChore = NULL);
4934 
4941 
4942  void _FullAliasWait(_TaskCollection * _PSnapPoint);
4943 
4950 
4951  void _Reset(_TaskCollection * _PSnapPoint);
4952 
4958 
4959  void _RaisedException();
4960 
4966 
4967  void _RaisedCancel();
4968 
4978 
4979  bool _SetCancelState(long _Status);
4980 
4988 
4989  void _CancelFromArbitraryThread(bool _InsideException);
4990 
5001 
5002  void _CancelDirectAliases(bool _InsideException, _TaskCollection * _PSnapPoint);
5003 
5016 
5017  void _CancelStolenContexts(bool _InsideException, bool _FInlineGated);
5018 
5022 
5023  void *_GetStealTrackingList() const;
5024 
5028 
5029  void _Initialize();
5030 
5037 
5038  void _AbortiveSweep(void *_PCtx);
5039 
5052 
5053  static bool __cdecl _CollectionMatchPredicate(_UnrealizedChore *_PChore, void *_PData);
5054 
5067 
5068  static bool __cdecl _SweepAbortedChore(_UnrealizedChore *_PChore, void *_PData);
5069 
5076 
5077  bool _TaskCleanup(bool _FExceptional);
5078 
5082 
5083  static void __cdecl _CancelViaToken(_TaskCollection *_PCollection);
5084 
5089 
5090  _CONCRT_BUFFER _M_stealTracker[_SAFERWLIST_SIZE];
5091 
5096 
5098 
5103 
5104  volatile long _M_exitCode;
5105 
5109 
5110  volatile long _M_executionStatus;
5111 
5115 
5116  event _M_event;
5117 
5118  _TaskCollection * _M_pOriginalCollection;
5119  _TaskCollection * _M_pNextAlias;
5121 
5122  int _M_taskCookies[2];
5123 
5124  volatile long _M_flags;
5125  volatile long _M_chaining;
5126 
5129 
5132  };
5133 
5139  {
5140  public:
5141  _StackGuard() : _Depth(_GetCurrentInlineDepth())
5142  {
5143  // _Depth is the reference to the depth slot on context.
5144  ++_Depth;
5145  }
5146  _StackGuard(const _StackGuard&) = delete;
5147 
5148  _StackGuard& operator=(const _StackGuard&) = delete;
5149 
5151  {
5152  // _Depth is the reference to the depth slot on context.
5153  --_Depth;
5154  }
5155 
5156  bool _ShouldInline(_TaskInliningMode _InliningMode) const
5157  {
5158  // As _TaskInliningMode is defined as inlining threshold, we can directly convert
5159  // it into size_t, and compare with current context inlining depth.
5160  return _Depth <= static_cast<size_t>(_InliningMode);
5161  }
5162  private:
5163  size_t & _Depth;
5164 
5170  _CONCRTIMP static size_t & __cdecl _GetCurrentInlineDepth();
5171  };
5172 
5179  {
5180  public:
5181  _AsyncTaskCollection(const _AsyncTaskCollection&) = delete;
5192  _CONCRTIMP static _AsyncTaskCollection * __cdecl _NewCollection(_CancellationTokenState *_PTokenState);
5193 
5212  {
5213  _CONCRT_ASSERT(_PChore != nullptr);
5214  _Reference();
5215 
5216  if (_InliningMode == _NoInline)
5217  {
5218  _M_taskCollection._Schedule(_PChore);
5219  return _NotComplete;
5220  }
5221  else
5222  {
5223  _StackGuard _Guard;
5224  if (_Guard._ShouldInline(_InliningMode))
5225  {
5226  return _M_taskCollection._RunAndWait(_PChore);
5227  }
5228  else
5229  {
5230  _M_taskCollection._Schedule(_PChore);
5231  return _NotComplete;
5232  }
5233  }
5234  }
5235 
5239  void _Cancel()
5240  {
5241  _M_taskCollection._Cancel();
5242  }
5243 
5258  {
5259  // Note that _Guard is NOT unused variable, the constructor and destructor will be called to maintain inline depth.
5260  _StackGuard _Guard;
5261  return _M_taskCollection._RunAndWait();
5262  }
5263 
5264  private:
5265 
5266  void _NotificationHandler();
5267 
5268  _CONCRTIMP virtual void _Destroy();
5269 
5270  // Private constructor
5272 
5273  __declspec(noinline)
5274  static void __cdecl _CompletionHandler(void * _PCompletionContext);
5275 
5276  private:
5277 
5278  // Underlying task collection where the chore is scheduled to run
5279  _TaskCollection _M_taskCollection;
5280  };
5281 
5286  {
5287  volatile long _M_signals;
5288  };
5289 
5300  {
5301  public:
5302 
5304 
5305  _CONCRTIMP ~_Cancellation_beacon();
5306 
5307  bool _Is_signaled() const
5308  {
5309  return (_M_pRef->_M_signals != 0);
5310  }
5311 
5312  // This method should only be called when the beacon is signaled. It confirms whether a cancellation is indeed happening and that the beacon
5313  // was not flagged due to a false positive race. If the cancellation is not confirmed, the beacon is lowered.
5314  _CONCRTIMP bool _Confirm_cancel();
5315 
5316  void _Raise()
5317  {
5318  _InterlockedIncrement(&_M_pRef->_M_signals);
5319  }
5320 
5321  void _Lower()
5322  {
5323  _InterlockedDecrement(&_M_pRef->_M_signals);
5324  }
5325 
5326  private:
5327 
5329 
5330  };
5331 
5332  //
5333  // Internal stub class.
5334  //
5335  class _TimerStub;
5336 
5337  //
5338  // Internal wrapper around timers in order to allow timer messaging blocks to share implementation with internal ConcRT runtime
5339  // timers.
5340  //
5341  class _Timer
5342  {
5343  protected:
5344  // Constructs a new timer.
5345  //
5346  // _Ms: The duration and period of the timer in milliseconds.
5347  // _FRepeating: An indication of whether the timer is repeating (periodic) or not.
5348  _CONCRTIMP _Timer(unsigned int _Ms, bool _FRepeating);
5349 
5350  // Destroys the timer.
5351  _CONCRTIMP virtual ~_Timer();
5352 
5353  // Starts the timer.
5354  _CONCRTIMP void _Start();
5355 
5356  // Stops the timer.
5357  _CONCRTIMP void _Stop();
5358 
5359  private:
5360  friend class _TimerStub;
5361 
5362  // Called when the timer fires.
5363  virtual void _Fire() =0;
5364 
5365  // The actual timer
5366  HANDLE _M_hTimer;
5367 
5368  // The duration and period of the timer.
5369  unsigned int _M_ms;
5370 
5371  // Whether the timer is repeating (periodic by _M_ms)
5373  };
5374 
5375  //
5376  // Internal runtime structure that holds the trace flags and level for ETW events
5377  // provided by the Concurrent runtime.
5378  //
5380  {
5381  volatile unsigned long EnableFlags; // Determines which class of events to log
5382  volatile unsigned char EnableLevel; // Determines the severity of events to log
5383 
5384  void _EnableTrace(unsigned char _Level, unsigned long _Flags)
5385  {
5386  EnableFlags = _Flags;
5387  EnableLevel = _Level;
5388  }
5389 #pragma warning ( push )
5390 #pragma warning ( disable : 5393 ) // unreachable code
5392  {
5393  EnableLevel = 0;
5394  EnableFlags = 0;
5395  }
5396 #pragma warning ( pop )
5397 
5398  bool _IsEnabled(unsigned char _Level, unsigned long _Flags) const
5399  {
5400  return ((_Level <= EnableLevel) && ((EnableFlags & _Flags) == _Flags));
5401  }
5402  };
5403 
5408 
5409  _CONCRTIMP const _CONCRT_TRACE_INFO * _GetConcRTTraceInfo();
5410 
5414 
5416 
5420 
5422 
5423 } // namespace details
5424 
5425 
5432 
5433 __declspec(deprecated("Concurrency::EnableTracing is a deprecated function.")) _CONCRTIMP HRESULT __cdecl EnableTracing();
5434 
5442 
5443 __declspec(deprecated("Concurrency::DisableTracing is a deprecated function.")) _CONCRTIMP HRESULT __cdecl DisableTracing();
5444 
5448 
5450 {
5454 
5459 
5464 
5469 
5474 
5479 
5484 
5489 
5494 
5496 };
5497 
5498 // Common trace header structure for all ConcRT diagnostic events
5499 // struct CONCRT_TRACE_EVENT_HEADER_COMMON
5500 // {
5501 // EVENT_TRACE_HEADER header;
5502 // DWORD VirtualProcessorID;
5503 // DWORD SchedulerID;
5504 // DWORD ContextID;
5505 // DWORD ScheduleGroupID;
5506 // };
5507 
5511 
5512 _CONCRTIMP extern const GUID ConcRT_ProviderGuid;
5513 
5514 //
5515 // GUIDS for events
5516 //
5517 
5524 
5525 _CONCRTIMP extern const GUID ConcRTEventGuid;
5526 
5532 
5533 _CONCRTIMP extern const GUID SchedulerEventGuid;
5534 
5542 
5543 _CONCRTIMP extern const GUID ScheduleGroupEventGuid;
5544 
5549 
5550 _CONCRTIMP extern const GUID ContextEventGuid;
5551 
5560 
5561 _CONCRTIMP extern const GUID ChoreEventGuid;
5562 
5566 
5567 _CONCRTIMP extern const GUID VirtualProcessorEventGuid;
5568 
5577 
5578 _CONCRTIMP extern const GUID LockEventGuid;
5579 
5587 
5588 _CONCRTIMP extern const GUID ResourceManagerEventGuid;
5589 
5595 
5596 _CONCRTIMP extern const GUID PPLParallelInvokeEventGuid;
5597 
5603 
5604 _CONCRTIMP extern const GUID PPLParallelForEventGuid;
5605 
5611 
5612 _CONCRTIMP extern const GUID PPLParallelForeachEventGuid;
5613 
5617 
5618 _CONCRTIMP extern const GUID AgentEventGuid;
5619 
5620 // Trace an event signaling a parallel function
5621 _CONCRTIMP void __cdecl _Trace_ppl_function(const GUID& _Guid, unsigned char _Level, ConcRT_EventType _Type);
5622 
5626 
5628 {
5635 
5636  AllEventsFlag = 0xFFFFFFFF
5637 };
5638 
5642 
5644 {
5648 
5650 
5654 
5656 
5660 
5662 
5666 
5668 
5672 
5674 
5678 
5680 
5684 
5686 
5690 
5692 
5693 };
5694 
5695 // // Common trace payload for agents
5696 //
5697 // struct AGENTS_TRACE_PAYLOAD
5698 // {
5699 // // Identifier of the agent or message block that is emitting the event
5700 // __int64 AgentId1;
5701 // union
5702 // {
5703 // // The identifier of a target block for link/unlink event
5704 // __int64 AgentId2;
5705 //
5706 // // Count of messages processed for the end event
5707 // long Count;
5708 //
5709 // // Name of this agent for the purposes of the ETW trace
5710 // wchar_t Name[32];
5711 // };
5712 // };
5713 
5714 // Emit a trace event specific to the agents library of the given type and payload
5715 _CONCRTIMP void __cdecl _Trace_agents(Agents_EventType _Type, __int64 _AgentId, ...);
5716 }
5717 
5718 #ifndef _NO_DEFAULT_CONCRT_LIB
5719 
5720 #undef _DEBUG_AFFIX
5721 #undef _IDL_AFFIX
5722 #undef _IDL_DEFAULT
5723 #undef _LIB_STEM
5724 
5725 #ifdef _DEBUG
5726  #define _DEBUG_AFFIX "d"
5727  #define _IDL_DEFAULT 2
5728 #else
5729  #define _DEBUG_AFFIX ""
5730  #define _IDL_DEFAULT 0
5731 #endif /* _DEBUG */
5732 
5733 #if defined(_DLL) && !defined(_STATIC_CPPLIB)
5734  #define _LIB_STEM "concrt"
5735 #else /* defined(_DLL) && !defined(_STATIC_CPPLIB) */
5736  #define _LIB_STEM "libconcrt"
5737  #if _ITERATOR_DEBUG_LEVEL != _IDL_DEFAULT
5738  #define _IDL_AFFIX _CRT_STRINGIZE(_ITERATOR_DEBUG_LEVEL)
5739  #endif /* _ITERATOR_DEBUG_LEVEL != _IDL_DEFAULT */
5740 #endif /* defined(_DLL) && !defined(_STATIC_CPPLIB) */
5741 
5742 #ifndef _IDL_AFFIX
5743  #define _IDL_AFFIX ""
5744 #endif /* _IDL_AFFIX */
5745 
5746 #pragma comment(lib, _LIB_STEM _DEBUG_AFFIX _IDL_AFFIX)
5747 
5748 #undef _DEBUG_AFFIX
5749 #undef _IDL_AFFIX
5750 #undef _IDL_DEFAULT
5751 #undef _LIB_STEM
5752 
5753 #endif /* _NO_DEFAULT_CONCRT_LIB */
5754 
5755 namespace concurrency = ::Concurrency;
5756 
5757 #pragma pop_macro("_YieldProcessor")
5758 #pragma pop_macro("new")
5759 #pragma pack(pop)
friend class _TaskCollection
Definition: concrt.h:4265
DWORD _M_boundQueueId
Definition: concrt.h:5127
_Scoped_lock const & operator=(const _Scoped_lock &)
void operator=(const _SpinLock &)
_CONCRTIMP const GUID VirtualProcessorEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to v...
A cancellation beacon is a flag which can be polled in an inlinable fashion using the is_signaled met...
Definition: concrt.h:5299
std::exception_ptr * _Exception() const
Definition: concrt.h:4375
int _InliningDepth() const
Definition: concrt.h:4342
_CONCRTIMP void __cdecl wait(unsigned int _Milliseconds)
Pauses the current context for a specified amount of time.
Definition: concrt.h:364
void _Raise()
Definition: concrt.h:5316
_CONCRTIMP scheduler_resource_allocation_error(_In_z_ const char *_Message, HRESULT _Hresult)
Constructs a scheduler_resource_allocation_error object.
void _PrepareStealStructured(ContextBase *_PContext)
void _Assign(const location &_Rhs)
Assigns _Rhs to this location.
Definition: concrt.h:1999
_CONCRTIMP const GUID ContextEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to c...
void * _M_pReaderHead
Definition: concrt.h:3897
void _AddNode(_ElemType *_Elem)
Definition: concrt.h:1114
bool _ShouldInline(_TaskInliningMode _InliningMode) const
Definition: concrt.h:5156
Definition: functional:68
long _Reference()
Definition: concrt.h:4042
This class describes an exception thrown when an attempt is made to set the concurrency limits of a S...
Definition: concrt.h:1655
_Variant_copymove_layer_ & operator=(_Variant_copymove_layer_ &&_That) _NOEXCEPT_OP((conjunction< is_nothrow_move_constructible< _Types >...
_ReaderWriterLock & _M_lock
Definition: concrt.h:1005
_YieldFunction _M_yieldFunction
Definition: concrt.h:756
Agents_EventType
The types of events that can be traced using the tracing functionality offered by the Agents Library ...
Definition: concrt.h:5643
_SpinWait(_YieldFunction _YieldMethod=_UnderlyingYield)
Construct a spin wait object
Definition: concrt.h:588
static _CONCRTIMP void __cdecl _Yield()
An event type used for miscellaneous events.
Definition: concrt.h:5455
static void __cdecl _Initialize()
Indicates that the location represents a particular NUMA node.
Definition: concrt.h:1950
An event type that marks the beginning of a start/end event pair.
Definition: concrt.h:5465
size_t _M_ElemsConstructed
Definition: concrt.h:1089
_NonReentrantPPLLock & _M_lock
Definition: concrt.h:4118
_CONCRTIMP bool try_lock_for(unsigned int _Timeout)
Tries to acquire the lock without blocking for a specific number of milliseconds. ...
_CONCRTIMP ~scoped_lock_read()
Destroys a scoped_lock_read object and releases the lock supplied in its constructor.
_CONCRTIMP event()
Constructs a new event.
Definition: concrt.h:529
critical_section & native_handle_type
A reference to a critical_section object.
Definition: concrt.h:3557
Structured task collections represent groups of work which follow a strictly LIFO ordered paradigm qu...
Definition: concrt.h:4495
void * _M_pOwningContext
Definition: concrt.h:4452
Async Task collections is a thin wrapper over task collection to cater to the execution of asynchrono...
Definition: concrt.h:5178
This class describes an exception thrown when a lock is acquired improperly.
Definition: concrt.h:1801
long _M_activeStealersForCancellation
A count of active stealers for CANCELLATION PURPOSES ONLY. This is non-interlocked and guarded by the...
Definition: concrt.h:5097
::Concurrency::critical_section _M_criticalSection
Definition: concrt.h:4163
An event type that represents the linking of message blocks
Definition: concrt.h:5679
This class describes an exception thrown when an invalid or unknown key is passed to a SchedulerPolic...
Definition: concrt.h:1599
static _Ty _LoadWithAquire(volatile _Ty &_Location)
Definition: concrt.h:413
void _Construct()
Definition: concrt.h:4619
Definition: concrt.h:376
An event type that represents the unlinking of message blocks
Definition: concrt.h:5685
static _CONCRTIMP void __cdecl _ScheduleTask(TaskProc _Proc, void *_Data)
volatile long _M_owner
Definition: concrt.h:4169
An event type that represents the act of a attaching to a scheduler.
Definition: concrt.h:5490
_CONCRTIMP void _Acquire(void *_Lock_node)
void _DisableTrace()
Definition: concrt.h:5391
bool _IsEnabled(unsigned char _Level, unsigned long _Flags) const
Definition: concrt.h:5398
Implements busy wait with no backoff
Definition: concrt.h:578
bool operator!=(const location &_Rhs) const
Determines whether two location objects represent different location.
Definition: concrt.h:1919
TaskProc m_pFunction
Definition: concrt.h:4191
#define _CONCRT_ASSERT(x)
Definition: concrt.h:123
Definition: concrt.h:4304
_CONCRTIMP void __cdecl _Trace_ppl_function(const GUID &_Guid, unsigned char _Level, ConcRT_EventType _Type)
_TaskCollectionBase * _M_pParent
Definition: concrt.h:4442
_Ty _FetchAndAdd(_Ty _Addend)
Definition: concrt.h:501
_TaskCollectionBase(_CancellationTokenState *_PTokenState)
Definition: concrt.h:4324
bool _IsMarkedForCancellation() const
Definition: concrt.h:4393
unsigned int _M_ms
Definition: concrt.h:5369
_CONCRTIMP unsigned int _Release()
Definition: concrt.h:4031
_CONCRTIMP void lock()
Acquires the reader-writer lock as a writer.
_CONCRTIMP scoped_lock_read(reader_writer_lock &_Reader_writer_lock)
Constructs a scoped_lock_read object and acquires the reader_writer_lock object passed in the _Reader...
_UnrealizedChore()
Definition: concrt.h:4201
unsigned int size_t
Definition: sourceannotations.h:19
Indicates that the location represents a particular scheduling node.
Definition: concrt.h:1955
This class describes an exception thrown when the Reference method is called on a Scheduler object th...
Definition: concrt.h:1370
_Ty operator=(_Ty _Rhs)
Definition: concrt.h:492
Definition: concrt.h:5633
void(__cdecl * CHOREFUNC)(_UnrealizedChore *_PChore)
Definition: concrt.h:4266
void *volatile _M_pHead
Definition: concrt.h:3645
bool _HasWriteLock() const
Definition: concrt.h:955
_CONCRTIMP const GUID PPLParallelForEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to u...
volatile long & _M_flag
Definition: concrt.h:532
static void __cdecl _WaitEquals(volatile const long &_Location, long _Value, long _Mask=0xFFFFFFFF)
void _Unlock_reader()
Called from unlock() when a reader is holding the lock. Reader count is decremented and if this is th...
_CONCRTIMP scoped_lock(reader_writer_lock &_Reader_writer_lock)
Constructs a scoped_lock object and acquires the reader_writer_lock object passed in the _Reader_writ...
_In_ long
Definition: corecrt_wstdlib.h:88
_TaskCollectionStatus _RunAndWait()
A cancellation friendly wrapper with which to execute _PChore and then waits for all chores running i...
Definition: concrt.h:5257
CHOREFUNC _M_pChoreFunction
Definition: concrt.h:4272
This class describes an exception thrown when calls to the Block and Unblock methods of a Context obj...
Definition: concrt.h:1429
This class describes an exception thrown when a messaging block is given a pointer to a target which ...
Definition: concrt.h:1520
void(__cdecl * TaskProc)(void *)
Concurrency::details contains definitions of support routines in the public namespaces and one or mor...
Definition: concrt.h:251
_CONCRT_BUFFER _M_criticalSection[(4 *sizeof(void *)+2 *sizeof(long)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:812
scoped_lock const & operator=(const scoped_lock &)
size_t _CancelState() const
Definition: concrt.h:4387
An event type that represents the creation of an object
Definition: concrt.h:5649
virtual ~_RefCounterBase()
Definition: concrt.h:4035
RAII wrapper used to maintain and limit ppltask maximum inline schedule depth. This class will keep a...
Definition: concrt.h:5138
long _M_unpoppedChores
Definition: concrt.h:4456
_CONCRTIMP scheduler_worker_creation_error(_In_z_ const char *_Message, HRESULT _Hresult)
Constructs a scheduler_worker_creation_error object.
ConcRT_EventType
The types of events that can be traced using the tracing functionality offered by the Concurrency Run...
Definition: concrt.h:5449
void * _M_pTaskExtension
Definition: concrt.h:5120
_CONCRTIMP const GUID PPLParallelInvokeEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to u...
_TaskCollectionStatus _ScheduleWithAutoInline(_UnrealizedChore *_PChore, _TaskInliningMode _InliningMode)
Schedule a chore with automatic inlining. The chore is pushed onto the associated workstealing queue...
Definition: concrt.h:5211
An event type that represents the act of unblocking a context.
Definition: concrt.h:5475
Definition: concrt.h:4305
long _M_recursionCount
Definition: concrt.h:859
_Scoped_lock(_NonReentrantBlockingLock &_Lock)
Definition: concrt.h:895
critical_section & _M_critical_section
Definition: concrt.h:3625
volatile unsigned long EnableFlags
Definition: concrt.h:5381
_SpinWait< 0 > _SpinWaitNoYield
Definition: concrt.h:760
#define _Post_invalid_
Definition: sal.h:692
Concrt_TraceFlags
Trace flags for the event types
Definition: concrt.h:5627
void _IncrementConstructedElemsCount()
Definition: concrt.h:1072
~location()
Destroys a location object.
Definition: concrt.h:1896
volatile long _M_signals
Definition: concrt.h:5287
_CONCRTIMP const GUID ConcRT_ProviderGuid
The ETW provider GUID for the Concurrency Runtime.
_MallocaListHolder & operator=(const _MallocaListHolder &)
virtual ~_Chore()
Definition: concrt.h:4184
_ElemNodeType & operator=(const _ElemNodeType &)
_At_(this->_M_FirstNode, _Pre_valid_) virtual ~_MallocaListHolder()
Definition: concrt.h:1133
Definition: concrt.h:5632
_CONCRTIMP native_handle_type native_handle()
Returns a platform specific native handle, if one exists.
_CONCRTIMP scheduler_not_attached()
Constructs a scheduler_not_attached object.
_CONCRTIMP void *__cdecl Alloc(size_t _NumBytes)
Allocates a block of memory of the size specified from the Concurrency Runtime Caching Suballocator...
HRESULT _Hresult
Definition: concrt.h:1222
long _M_recursionCount
Definition: concrt.h:4166
static _Ty _Decrement(volatile _Ty &_Location)
Definition: concrt.h:435
_CONCRTIMP scoped_lock(critical_section &_Critical_section)
Constructs a scoped_lock object and acquires the critical_section object passed in the _Critical_sect...
_CONCRTIMP improper_scheduler_detach()
Constructs an improper_scheduler_detach object.
The Concurrency namespace provides classes and functions that provide access to the Concurrency Runti...
Definition: agents.h:43
Definition: concrt.h:4172
__inline void __CRTDECL _freea(_Pre_maybenull_ _Post_invalid_ void *_Memory)
Definition: malloc.h:147
#define _Pre_valid_
Definition: sal.h:668
location(const location &_Src)
Constructs a location object.
Definition: concrt.h:1849
_CRT_BEGIN_C_HEADER _Check_return_ _Ret_maybenull_ _In_ size_t _Size
Definition: corecrt_malloc.h:58
_ElemType * _InitOnRawMalloca(void *_MallocaRet)
Definition: concrt.h:1062
_CONCRTIMP ~event()
Destroys an event.
static _CONCRTIMP unsigned int __cdecl _Value()
_CONCRT_BUFFER _M_criticalSection[(4 *sizeof(void *)+2 *sizeof(long)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:915
A writer-preference queue-based reader-writer lock with local only spinning. The lock grants first in...
Definition: concrt.h:3670
_CONCRTIMP reader_writer_lock()
Constructs a new reader_writer_lock object.
volatile long _M_exitCode
An indication of the exit code of the chore. Anything non-zero here indicates cancellation of one for...
Definition: concrt.h:5104
void _InternalDeleteHelper(_Ty *_PObject)
Definition: concrt.h:272
This class describes an exception thrown because of a failure to acquire a critical resource in the C...
Definition: concrt.h:1188
::Concurrency::Scheduler * _M_pScheduler
Definition: concrt.h:385
Definition: concrt.h:820
bool _PerformedInlineCancel() const
Definition: concrt.h:4399
_MallocaArrayHolder()
Definition: concrt.h:1048
_CONCRTIMP const GUID ConcRTEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are not more specifically...
void _Lower()
Definition: concrt.h:5321
scoped_lock_read const & operator=(const scoped_lock_read &)
_In_ size_t _In_ int _Index
Definition: time.h:102
volatile long _M_flags
Definition: concrt.h:5124
unsigned __int64 * PDWORD_PTR
Definition: concrt.h:103
Indicates that the location represents a particular execution resource.
Definition: concrt.h:1960
_CONCRTIMP const GUID PPLParallelForeachEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to u...
Definition: pplinterface.h:224
_SpinWait _SpinWaitBackoffNone
Definition: concrt.h:759
void _PrepareStealUnstructured(ContextBase *_PContext)
An event type that represents the name for an object
Definition: concrt.h:5691
void *volatile _M_pTail
Definition: concrt.h:3646
unsigned int _M_reserved
Definition: concrt.h:2062
Definition: concrt.h:483
_CONCRTIMP void __cdecl _Trace_agents(Agents_EventType _Type, __int64 _AgentId,...)
_TaskCollectionStatus _Wait()
Waits for all chores running in the _StructuredTaskCollection to finish (normally or abnormally)...
Definition: concrt.h:4604
static _CONCRTIMP _Context __cdecl _CurrentContext()
void * _M_pCompletionContext
Definition: concrt.h:5131
_CONCRTIMP context_unblock_unbalanced()
Constructs a context_unblock_unbalanced object.
_NonReentrantPPLLock & operator=(const _NonReentrantPPLLock &)=delete
_CONCRTIMP bool try_lock_read()
Attempts to acquire the reader-writer lock as a reader without blocking.
_CONCRTIMP void unlock()
Unlocks the reader-writer lock based on who locked it, reader or writer.
_CONCRTIMP invalid_oversubscribe_operation()
Constructs an invalid_oversubscribe_operation object.
_CONCRT_BUFFER _M_node[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:3626
A non-reentrant mutex which is explicitly aware of the Concurrency Runtime.
Definition: concrt.h:3488
_TaskCollectionStatus _Wait()
Waits for all chores running in the _TaskCollection to finish (normally or abnormally). This method encapsulates all the running tasks in an exception handling block, and will re-throw any exceptions that occur in any of it tasks (if those exceptions occur on another thread, they are marshaled from that thread to the thread where the _TaskCollection was created, and re-thrown). After this function returns, the _TaskCollection cannot be used for scheduling further work.
Definition: concrt.h:4809
void _Switch_to_active(void *_PWriter)
The writer node allocated on the stack never really owns the lock because it would go out of scope an...
location & operator=(const location &_Rhs)
Assigns the contents of a different location object to this one.
Definition: concrt.h:1886
static _CONCRTIMP unsigned int __cdecl _Id()
_ReentrantPPLLock & operator=(const _ReentrantPPLLock &)=delete
std::exception_ptr * _M_pException
Definition: concrt.h:4468
bool _SpinOnce()
Spins for one time quantum,until a maximum spin is reached.
Definition: concrt.h:626
__declspec(noinline) static void __cdecl _StructuredChoreWrapper(_UnrealizedChore *_PChore)
_GROUP_AFFINITY * PGROUP_AFFINITY
Definition: concrt.h:58
_CONCRT_BUFFER _M_lockNode[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:4155
int _M_stackPos
Definition: concrt.h:5128
bool _M_fRepeating
Definition: concrt.h:5372
_CONCRTIMP context_self_unblock()
Constructs a context_self_unblock object.
_CONCRTIMP _Scheduler(::Concurrency::Scheduler *_PScheduler=NULL)
Definition: concrt.h:379
void _Reset()
Resets the counts and state to the default.
Definition: concrt.h:718
_CONCRTIMP improper_scheduler_reference()
Constructs an improper_scheduler_reference object.
#define _Pre_maybenull_
Definition: sal.h:678
static void __cdecl _CancelViaToken(::Concurrency::details::ContextBase *_PContext)
An event type that represents the act of a detaching from a scheduler.
Definition: concrt.h:5495
void * _CONCRT_BUFFER
Definition: concrt.h:129
_Scoped_lock(_ReaderWriterLock &_Lock)
Definition: concrt.h:968
_CONCRTIMP void reset()
Resets the event to a non-signaled state.
void _YieldProcessor()
Definition: concrt.h:80
_Scoped_lock(_ReentrantBlockingLock &_Lock)
Definition: concrt.h:792
An event type that represents the conclusion of some processing
Definition: concrt.h:5661
#define _In_z_
Definition: sal.h:310
bool _IsCurrentlyInlined() const
Definition: concrt.h:4348
Definition: concrt.h:5629
_In_ wctype_t _Type
Definition: corecrt_wctype.h:111
void _Switch_to_active(void *_PLockingNode)
The node allocated on the stack never really owns the lock because it would go out of scope and the i...
_Beacon_reference * _M_pRef
Definition: concrt.h:5328
This class describes an exception thrown when the Attach method is called on a Scheduler object which...
Definition: concrt.h:1316
static _Ty _CompareAndSwap(volatile _Ty &_Location, _Ty _NewValue, _Ty _Comperand)
Definition: concrt.h:420
#define _Inout_opt_
Definition: sal.h:376
This class describes an exception thrown when an operation is performed which requires a scheduler to...
Definition: concrt.h:1290
void _PrepareSteal(ContextBase *_PContext)
This class describes an exception thrown when an operation has timed out.
Definition: concrt.h:1712
This class describes an exception thrown when there are tasks still scheduled to a task_group or stru...
Definition: concrt.h:1491
HANDLE _M_hTimer
Definition: concrt.h:5366
_TaskInliningMode
The enum defines inlining scheduling policy for ppltasks. Scheduling a chore or a functor with _TaskI...
Definition: pplinterface.h:221
_SpinState _M_state
Definition: concrt.h:755
bool _FastNodeIntersects(const location &_Rhs) const
Determines whether two locations have an intersection. This is a fast intersection which avoids certa...
_Ty operator++(int)
Definition: concrt.h:509
_CONCRTIMP const GUID ChoreEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to c...
_CONCRTIMP invalid_scheduler_policy_key()
Constructs an invalid_scheduler_policy_key object.
static unsigned int _S_spinCount
Definition: concrt.h:557
_CONCRTIMP _Context(::Concurrency::Context *_PContext=NULL)
Definition: concrt.h:367
This class describes an exception thrown when a task_handle object is scheduled multiple times using ...
Definition: concrt.h:1746
bool _Is_signaled() const
Definition: concrt.h:5307
_CONCRTIMP void __cdecl Free(_Pre_maybenull_ _Post_invalid_ void *_PAllocation)
Releases a block of memory previously allocated by the Alloc method to the Concurrency Runtime Cachin...
critical_section & operator=(const critical_section &)
Hide assignment operator for a critical section
_ElemNodeType * _M_Next
Definition: concrt.h:1150
#define _SAFERWLIST_SIZE
Definition: concrt.h:131
Definition: concrt.h:4303
friend class _StructuredTaskCollection
Definition: concrt.h:4264
_Ty * _As() const
Returns the internal binding as a specified object.
Definition: concrt.h:2024
_CONCRTIMP void _Acquire(void *_Lock_node)
bool _FastVPIntersects(const location &_Rhs) const
Determines whether two locations have an intersection. This is a fast intersection which avoids certa...
unsigned long _M_currentYield
Definition: concrt.h:754
void _RegisterConcRTEventTracing()
Register ConcRT as an ETW Event Provider.
size_t _GetAllocationSize() const
Definition: concrt.h:1104
reader_writer_lock & _M_reader_writer_lock
Definition: concrt.h:3795
_CONCRTIMP nested_scheduler_missing_detach()
Constructs a nested_scheduler_missing_detach object.
void _Remove_last_writer(void *_PWriter)
When the last writer leaves the lock, it needs to reset the tail to NULL so that the next coming writ...
_SECURITY_ATTRIBUTES * LPSECURITY_ATTRIBUTES
Definition: concrt.h:55
_NonReentrantBlockingLock & _M_lock
Definition: concrt.h:906
bool _TaskCleanup()
Performs task cleanup normally done at destruction time.
Definition: concrt.h:4640
bool _ShouldSpinAgain()
Determines whether maximum spin has been reached
Definition: concrt.h:748
This class describes an exception thrown when a messaging block is unable to find a requested message...
Definition: concrt.h:1544
struct _Meta_at_< _Meta_list< _Types...>, _Idx, enable_if_t<(_Idx< sizeof...(_Types))> >:decltype(_Meta_at_impl< _Meta_repeat_n_c< _Idx, void * >>::_Eval(_Type_as_pointer< _Types >()...)){};template< class _List, class _Ty > struct _Meta_find_index_{};template< class _List, class _Ty > using _Meta_find_index=typename _Meta_find_index_< _List, _Ty >::type;constexpr auto _Meta_npos=static_cast< size_t >(-1);constexpr size_t _Meta_find_index_i_(bool const *const _Ptr, const size_t _Count, const size_t _Idx=0){return(_Idx >=_Count?_Meta_npos:_Ptr[_Idx]?_Idx:_Meta_find_index_i_(_Ptr, _Count, _Idx+1));}template< class _Ty > struct _Meta_find_index_< _Meta_list<>, _Ty >:integral_constant< size_t, _Meta_npos >{};template< class..._Types, class _Ty > struct _Meta_find_index_< _Meta_list< _Types...>, _Ty >{static constexpr bool _Bools[]={is_same< _Types, _Ty >::value...};using type=integral_constant< size_t, _Meta_find_index_i_(_Bools, sizeof...(_Types))>;};template< class _List, class _Ty > struct _Meta_find_unique_index_{};template< class _List, class _Ty > using _Meta_find_unique_index=typename _Meta_find_unique_index_< _List, _Ty >::type;constexpr size_t _Meta_find_unique_index_i_2(bool const *const _Ptr, const size_t _Count, const size_t _First){return(_First!=_Meta_npos &&_Meta_find_index_i_(_Ptr, _Count, _First+1)==_Meta_npos?_First:_Meta_npos);}constexpr size_t _Meta_find_unique_index_i_(bool const *const _Ptr, const size_t _Count){return(_Meta_find_unique_index_i_2(_Ptr, _Count, _Meta_find_index_i_(_Ptr, _Count)));}template< class _Ty > struct _Meta_find_unique_index_< _Meta_list<>, _Ty >:integral_constant< size_t, _Meta_npos >{};template< class..._Types, class _Ty > struct _Meta_find_unique_index_< _Meta_list< _Types...>, _Ty >{using type=integral_constant< size_t, _Meta_find_unique_index_i_(_Meta_find_index_< _Meta_list< _Types...>, _Ty >::_Bools, sizeof...(_Types))>;};template< class > struct _Meta_as_list_{};template< class _Ty > using _Meta_as_list=typename _Meta_as_list_< _Ty >::type;template< template< class > class _Template, class..._Types > struct _Meta_as_list_< _Template< _Types...> >{using type=_Meta_list< _Types...>;};template< class _Ty, _Ty..._Idxs > struct _Meta_as_list_< integer_sequence< _Ty, _Idxs...> >{using type=_Meta_list< integral_constant< _Ty, _Idxs >...>;};template< class _List > struct _Meta_as_integer_sequence_{};template< class _List > using _Meta_as_integer_sequence=typename _Meta_as_integer_sequence_< _List >::type;template< class _Ty, _Ty..._Idxs > struct _Meta_as_integer_sequence_< _Meta_list< integral_constant< _Ty, _Idxs >...> >{using type=integer_sequence< _Ty, _Idxs...>;};template< class...> struct _Meta_concat_{};template< class..._Types > using _Meta_concat=typename _Meta_concat_< _Types...>::type;template<> struct _Meta_concat_< _Meta_list<> >{using type=_Meta_list<>;};template< class..._Items1 > struct _Meta_concat_< _Meta_list< _Items1...> >{using type=_Meta_list< _Items1...>;};template< class..._Items1, class..._Items2 > struct _Meta_concat_< _Meta_list< _Items1...>, _Meta_list< _Items2...> >{using type=_Meta_list< _Items1..., _Items2...>;};template< class..._Items1, class..._Items2, class..._Items3 > struct _Meta_concat_< _Meta_list< _Items1...>, _Meta_list< _Items2...>, _Meta_list< _Items3...> >{using type=_Meta_list< _Items1..., _Items2..., _Items3...>;};template< class..._Items1, class..._Items2, class..._Items3, class..._Rest > struct _Meta_concat_< _Meta_list< _Items1...>, _Meta_list< _Items2...>, _Meta_list< _Items3...>, _Rest...>{using type=_Meta_concat< _Meta_list< _Items1..., _Items2..., _Items3...>, _Rest...>;};template< class _ListOfLists > using _Meta_join=_Meta_apply< _Meta_quote< _Meta_concat >, _ListOfLists >;template< class > struct _Meta_cartesian_product_{};template< class _ListOfLists > using _Meta_cartesian_product=typename _Meta_cartesian_product_< _ListOfLists >::type;template<> struct _Meta_cartesian_product_< _Meta_list<> >{using type=_Meta_list<>;};template< class..._Items > struct _Meta_cartesian_product_< _Meta_list< _Meta_list< _Items...> > >{using type=_Meta_list< _Meta_list< _Items >...>;};template< class..._Items, class..._Lists > struct _Meta_cartesian_product_< _Meta_list< _Meta_list< _Items...>, _Lists...> >{using type=_Meta_join< _Meta_list< _Meta_transform< _Meta_bind_back< _Meta_quote< _Meta_push_front >, _Items >, _Meta_cartesian_product< _Meta_list< _Lists...> > >...> >;};template< class..._Types > class variant;template< class _Ty > struct variant_size;template< class _Ty > struct variant_size< const _Ty >:variant_size< _Ty >::type{};template< class _Ty > struct variant_size< volatile _Ty >:variant_size< _Ty >::type{};template< class _Ty > struct variant_size< const volatile _Ty >:variant_size< _Ty >::type{};template< class _Ty > constexpr size_t variant_size_v=variant_size< _Ty >::value;template< class..._Types > struct variant_size< variant< _Types...> >:integral_constant< size_t, sizeof...(_Types)>{};template< size_t _Idx, class _Ty > struct variant_alternative;template< size_t _Idx, class _Ty > using variant_alternative_t=typename variant_alternative< _Idx, _Ty >::type;template< size_t _Idx, class _Ty > struct variant_alternative< _Idx, const _Ty >{using type=add_const_t< variant_alternative_t< _Idx, _Ty > >;};template< size_t _Idx, class _Ty > struct variant_alternative< _Idx, volatile _Ty >{using type=add_volatile_t< variant_alternative_t< _Idx, _Ty > >;};template< size_t _Idx, class _Ty > struct variant_alternative< _Idx, const volatile _Ty >{using type=add_cv_t< variant_alternative_t< _Idx, _Ty > >;};template< size_t _Idx, class..._Types > struct variant_alternative< _Idx, variant< _Types...> >{using type=_Meta_at_c< _Meta_list< _Types...>, _Idx >;};constexpr auto variant_npos=_Meta_npos;class bad_variant_access:public exception{public:bad_variant_access() _NOEXCEPT=default;virtual const char *__CLR_OR_THIS_CALL what() const _NOEXCEPT override{return("bad variant access");}protected:virtual void _Doraise() const {_RAISE(*this);}};struct _Variant_uses_allocator_t{};template< class _Ty, class=void > struct _Variant_item{remove_cv_t< _Ty > _Elem;template< class..._Types >constexpr explicit _Variant_item(_Types &&..._Args):_Elem(_STD forward< _Types >(_Args)...){}template< class _Alloc, class..._Types, enable_if_t< uses_allocator< _Ty, _Alloc >::value &&is_constructible< _Ty, allocator_arg_t, const _Alloc &, _Types...>::value, int >=0 >constexpr _Variant_item(_Variant_uses_allocator_t, const _Alloc &_Al, _Types &&..._Args):_Elem(allocator_arg, _Al, _STD forward< _Types >(_Args)...){}template< class _Alloc, class..._Types, enable_if_t< uses_allocator< _Ty, _Alloc >::value &&!is_constructible< _Ty, allocator_arg_t, const _Alloc &, _Types...>::value, int >=0 >constexpr _Variant_item(_Variant_uses_allocator_t, const _Alloc &_Al, _Types &&..._Args):_Elem(_STD forward< _Types >(_Args)..., _Al){}template< class _Alloc, class..._Types, enable_if_t<!uses_allocator< _Ty, _Alloc >::value, int >=0 >constexpr _Variant_item(_Variant_uses_allocator_t, const _Alloc &, _Types &&..._Args):_Elem(_STD forward< _Types >(_Args)...){}_CONSTEXPR14 _Ty &get()&{return(_Elem);}constexpr const _Ty &get() const &{return(_Elem);}_CONSTEXPR14 _Ty &&get()&&{return(_STD move(_Elem));}constexpr const _Ty &&get() const &&{return(_STD move(_Elem));}};template< bool _TrivialDestruction, class..._Types > class _Variant_storage_{};template< class..._Types > using _Variant_storage=_Variant_storage_< conjunction< is_trivially_destructible< _Variant_item< _Types > >...>::value, _Types...>;template< class _First, class..._Rest > class _Variant_storage_< true, _First, _Rest...>{public:static constexpr size_t _Size=1+sizeof...(_Rest);union{_Variant_item< _First > _Head;_Variant_storage< _Rest...> _Tail;};_Variant_storage_() _NOEXCEPT{}template< class..._Types >constexpr explicit _Variant_storage_(integral_constant< size_t, 0 >, _Types &&..._Args):_Head(_STD forward< _Types >(_Args)...){}template< size_t _Idx, class..._Types >constexpr explicit _Variant_storage_(integral_constant< size_t, _Idx >, _Types &&..._Args) _NOEXCEPT_OP((is_nothrow_constructible< _Variant_storage< _Rest...>, integral_constant< size_t, _Idx-1 >, _Types...>::value)):_Tail(integral_constant< size_t, _Idx-1 >{}, _STD forward< _Types >(_Args)...){}_Variant_storage_(_Variant_storage_ &&)=default;_Variant_storage_(const _Variant_storage_ &)=default;_Variant_storage_ &operator=(_Variant_storage_ &&)=default;_Variant_storage_ &operator=(const _Variant_storage_ &)=default;};template< class _First, class..._Rest > class _Variant_storage_< false, _First, _Rest...>{public:static constexpr size_t _Size=1+sizeof...(_Rest);union{_Variant_item< _First > _Head;_Variant_storage< _Rest...> _Tail;};~_Variant_storage_() _NOEXCEPT{}_Variant_storage_() _NOEXCEPT{}template< class..._Types >constexpr explicit _Variant_storage_(integral_constant< size_t, 0 >, _Types &&..._Args):_Head(_STD forward< _Types >(_Args)...){}template< size_t _Idx, class..._Types >constexpr explicit _Variant_storage_(integral_constant< size_t, _Idx >, _Types &&..._Args) _NOEXCEPT_OP((is_nothrow_constructible< _Variant_storage< _Rest...>, integral_constant< size_t, _Idx-1 >, _Types...>::value)):_Tail(integral_constant< size_t, _Idx-1 >{}, _STD forward< _Types >(_Args)...){}_Variant_storage_(_Variant_storage_ &&)=default;_Variant_storage_(const _Variant_storage_ &)=default;_Variant_storage_ &operator=(_Variant_storage_ &&)=default;_Variant_storage_ &operator=(const _Variant_storage_ &)=default;};template< size_t _Idx, class _Storage, enable_if_t< _Idx==0, int >=0 > constexpr decltype(auto) _Variant_raw_get(_Storage &&_Obj){return(_STD forward< _Storage >(_Obj)._Head);}template< size_t _Idx, class _Storage, enable_if_t< _Idx!=0, int >=0 > constexpr decltype(auto) _Variant_raw_get(_Storage &&_Obj){return(_Variant_raw_get< _Idx-1 >(_STD forward< _Storage >(_Obj)._Tail));}template< class _Storage, class _Fn > using _Variant_visit_raw_t=decltype(_STD declval< _Fn >()(integral_constant< size_t, 0 >{}, _Variant_raw_get< 0 >(_STD declval< _Storage >())));template< class _Storage, class _Fn, size_t _Idx > constexpr _Variant_visit_raw_t< _Storage, _Fn > _Variant_visit_raw_dispatch(_Storage &&_Obj, _Fn &&_Func){return(_STD forward< _Fn >(_Func)(integral_constant< size_t, _Idx >{}, _Variant_raw_get< _Idx >(_STD forward< _Storage >(_Obj))));}template< class _Storage, class _Fn, class _Indices > struct _Variant_raw_dispatch_table_;template< class _Storage, class _Fn > using _Variant_raw_dispatch_table=_Variant_raw_dispatch_table_< _Storage, _Fn, make_index_sequence< remove_reference_t< _Storage >::_Size > >;template< class _Storage, class _Fn, size_t..._Idxs > struct _Variant_raw_dispatch_table_< _Storage, _Fn, index_sequence< _Idxs...> >{using _Dispatch_t=_Variant_visit_raw_t< _Storage, _Fn >(*)(_Storage &&, _Fn &&);static constexpr _Dispatch_t _Array[]={&_Variant_visit_raw_dispatch< _Storage, _Fn, _Idxs >...};};template< class _Storage, class _Fn, size_t..._Idxs > constexpr typename _Variant_raw_dispatch_table_< _Storage, _Fn, index_sequence< _Idxs...> >::_Dispatch_t_Variant_raw_dispatch_table_< _Storage, _Fn, index_sequence< _Idxs...> >::_Array[];template< class _Storage, class _Fn, size_t..._Idxs > _CONSTEXPR14 _Variant_visit_raw_t< _Storage, _Fn > _Variant_visit_raw1(const size_t _Idx, _Storage &&_Obj, _Fn &&_Func, index_sequence< _Idxs...>){if(_Idx >=sizeof...(_Idxs)){_THROW_NCEE(bad_variant_access, _EMPTY_ARGUMENT);}constexpr auto &_Array=_Variant_raw_dispatch_table< _Storage, _Fn >::_Array;return(_Array[_Idx](_STD forward< _Storage >(_Obj), _STD forward< _Fn >(_Func)));}template< class _Storage, class _Fn > _CONSTEXPR14 _Variant_visit_raw_t< _Storage, _Fn > _Variant_visit_raw(const size_t _Idx, _Storage &&_Obj, _Fn &&_Func){return(_Variant_visit_raw1(_Idx, _STD forward< _Storage >(_Obj), _STD forward< _Fn >(_Func), make_index_sequence< remove_reference_t< _Storage >::_Size >{}));}template< size_t _Count > using _Variant_index_t=conditional_t<(_Count< static_cast< size_t >((numeric_limits< signed char >::max)())), signed char, conditional_t<(_Count< static_cast< size_t >((numeric_limits< short >::max)())), short, int > >;template< class..._Types > class _Variant_base:private _Variant_storage< _Types...>{public:using _Index_t=_Variant_index_t< sizeof...(_Types)>;static constexpr auto _Invalid_index=static_cast< _Index_t >(-1);_Index_t _Which;using _Storage_t=_Variant_storage< _Types...>;_CONSTEXPR14 _Storage_t &_Storage()&_NOEXCEPT{return(*this);}constexpr const _Storage_t &_Storage() const &_NOEXCEPT{return(*this);}_CONSTEXPR14 _Storage_t &&_Storage()&&_NOEXCEPT{return(_STD move(*this));}constexpr const _Storage_t &&_Storage() const &&_NOEXCEPT{return(_STD move(*this));}_Variant_base():_Storage_t{}, _Which{_Invalid_index}{}template< size_t _Idx, class..._UTypes, enable_if_t< is_constructible< _Variant_item< _Meta_at_c< _Meta_list< _Types...>, _Idx > >, _UTypes...>::value, int >=0 >constexpr explicit _Variant_base(in_place_index_t< _Idx >, _UTypes &&..._Args):_Storage_t(integral_constant< size_t, _Idx >{}, _STD forward< _UTypes >(_Args)...), _Which{static_cast< _Index_t >(_Idx)}{}constexpr bool valueless_by_exception() const _NOEXCEPT{return(_Which< 0);}constexpr size_t index() const _NOEXCEPT{return(static_cast< size_t >(_Which));}void _Set_index(const size_t _Idx){_Which=static_cast< _Index_t >(_Idx);}void _Reset() _NOEXCEPT{if(!this->valueless_by_exception()){_Reset1(_Conjunction_t< is_trivially_destructible< _Variant_item< _Types >>...>{});this->_Set_index(variant_npos);}}private:void _Reset1(true_type) _NOEXCEPT{}void _Reset1(false_type) _NOEXCEPT{_Variant_visit_raw(index(), _Storage(), [](auto, auto &_Obj){_Destroy_in_place(_Obj);});}};template< bool _AllTriviallyDestructible, class..._Types > struct _Variant_destroy_layer_;template< class..._Types > using _Variant_destroy_layer=_Variant_destroy_layer_< conjunction< is_trivially_destructible< _Variant_item< _Types > >...>::value, _Types...>;template< class..._Types > struct _Variant_destroy_layer_< true, _Types...>:_Variant_base< _Types...>{using _Variant_base< _Types...>::_Variant_base;};template< class..._Types > struct _Variant_destroy_layer_< false, _Types...>:_Variant_base< _Types...>{using _Variant_base< _Types...>::_Variant_base;~_Variant_destroy_layer_() _NOEXCEPT{this->_Reset();}};template< class..._Types > struct _Variant_construct_visitor{_Variant_base< _Types...> &_Self;template< class _Idx, class _Ty >void operator()(_Idx, _Ty &&_Source) const {_Construct_in_place(_Variant_raw_get< _Idx::value >(_Self._Storage()), _STD forward< _Ty >(_Source).get());_Self._Set_index(_Idx::value);}};template< class..._Types > struct _Variant_move_assign_visitor{_Variant_base< _Types...> &_Self;template< class _Idx, class _Ty >void operator()(_Idx, _Ty &&_Source) const {_Variant_raw_get< _Idx::value >(_Self._Storage()).get()=_STD forward< _Ty >(_Source).get();}};template< class..._Types > struct _Variant_direct_copy_assign_visitor{_Variant_destroy_layer< _Types...> &_Self;template< class _Idx, class _Ty >void operator()(_Idx, const _Ty &_Source) const {_Variant_raw_get< _Idx::value >(_Self._Storage()).get()=_Source.get();}};template< class..._Types > struct _Variant_indirect_copy_assign_visitor{_Variant_destroy_layer< _Types...> &_Self;template< class _Idx, class _Ty >void operator()(_Idx, const _Ty &_Source) const {auto _Temporary=_Source;_Self._Reset();_Construct_in_place(_Variant_raw_get< _Idx::value >(_Self._Storage()), _STD move(_Temporary));_Self._Set_index(_Idx::value);}};template< bool _UseTrivialSMFs, class..._Types > struct _Variant_copymove_layer_;template< class..._Types > using _Variant_copymove_layer=_Variant_copymove_layer_< conjunction< is_trivially_copyable< _Variant_item< _Types > >..., negation< is_reference< _Types > >...>::value, _Types...>;template< class..._Types > struct _Variant_copymove_layer_< true, _Types...>:_Variant_destroy_layer< _Types...>{using _Variant_destroy_layer< _Types...>::_Variant_destroy_layer;};template< class..._Types > struct _Variant_copymove_layer_< false, _Types...>:_Variant_destroy_layer< _Types...>{using _Variant_destroy_layer< _Types...>::_Variant_destroy_layer;_Variant_copymove_layer_()=default;_Variant_copymove_layer_(const _Variant_copymove_layer_ &_That){if(!_That.valueless_by_exception()){_Variant_visit_raw(_That.index(), _That._Storage(), _Variant_construct_visitor< _Types...>{*this});}}_Variant_copymove_layer_(_Variant_copymove_layer_ &&_That) _NOEXCEPT_OP(conjunction< is_nothrow_move_constructible< _Types >...>::value){if(!_That.valueless_by_exception()){_Variant_visit_raw(_That.index(), _STD move(_That)._Storage(), _Variant_construct_visitor< _Types...>{*this});}}_Variant_copymove_layer_ &operator=(const _Variant_copymove_layer_ &_That){if(this->_Which==_That._Which){if(!this->valueless_by_exception()){_Variant_visit_raw(_That.index(), _That._Storage(), _Variant_direct_copy_assign_visitor< _Types...>{*this});}}else{if(_That.valueless_by_exception()){this-> _Reset()
Definition: variant:942
::Concurrency::critical_section _M_criticalSection
Definition: concrt.h:4124
::Concurrency::details::_TaskCollectionBase * _M_pTaskCollection
Definition: concrt.h:4269
Definition: concrt.h:5341
_CONCRTIMP bool _IsSynchronouslyBlocked() const
_CONCRT_BUFFER _M_writerNode[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:3796
_RefCounterBase(long _InitialCount=1)
Definition: concrt.h:4075
_CONCRTIMP const _CONCRT_TRACE_INFO * _GetConcRTTraceInfo()
Retrieves a pointer to the internal trace flags and level information for the Concurrency runtime ETW...
_CONCRTIMP _Scoped_lock(_ReentrantPPLLock &_Lock)
_CRT_BEGIN_C_HEADER typedef void(__CRTDECL *terminate_handler)()
_Type _GetType() const
Returns the type which this location object represents.
Definition: concrt.h:2042
_In_ size_t _Out_opt_ int _In_z_ unsigned char const * _Src
Definition: mbstring.h:1039
void _Flush_current_owner()
Guarantees that if any context holds the lock at the time the method is called, that context has rele...
_ReentrantBlockingLock & _M_lock
Definition: concrt.h:803
An event type that marks the beginning of a start/end event pair.
Definition: concrt.h:5460
Definition: pplcancellation_token.h:106
_CONCRTIMP invalid_scheduler_policy_thread_specification()
Constructs an invalid_scheduler_policy_value object.
_Ty operator--(int)
Definition: concrt.h:517
unsigned int _GetId() const
Returns the ID which this location object represents.
Definition: concrt.h:2033
_TaskCollectionBase()
Definition: concrt.h:4313
An event type that represents the scheduling of a process
Definition: concrt.h:5673
char int *typedef int(__CRTDECL *_CRT_REPORT_HOOKW)(int
Definition: crtdbg.h:45
void _Unlock_writer()
Called from unlock() when a writer is holding the lock. Writer unblocks the next writer in the list a...
#define LONG_MIN
Definition: limits.h:37
_Ty operator+=(_Ty _Addend)
Definition: concrt.h:521
unsigned long _M_currentSpin
Definition: concrt.h:753
Definition: concrt.h:5634
bool _GetRuntimeOwnsLifetime() const
Definition: concrt.h:4232
TaskProc _M_completionHandler
Definition: concrt.h:5130
_TaskCollectionStatus
Definition: concrt.h:4301
_ReaderWriterLock & _M_lock
Definition: concrt.h:981
_CONCRTIMP HRESULT get_error_code() const
Returns the error code that caused the exception.
bool _PerformedPendingCancel() const
Definition: concrt.h:4405
_TaskCollectionBaseState
Definition: concrt.h:4367
_CancellationTokenState * _M_pTokenState
Definition: concrt.h:4449
::Concurrency::details::_TaskCollectionBase * _OwningCollection() const
Definition: concrt.h:4218
reader_writer_lock & operator=(const reader_writer_lock &_Lock)
Hide assignment operator for a reader_writer_lock
_Scoped_lock & operator=(const _Scoped_lock &)=delete
_CONCRTIMP operation_timed_out()
Constructs an operation_timed_out object.
unsigned __int64 DWORD_PTR
Definition: concrt.h:103
const unsigned int COOPERATIVE_TIMEOUT_INFINITE
Value indicating that a wait should never time out.
Definition: concrt.h:3478
_Chore(TaskProc _PFunction)
Definition: concrt.h:4176
unsigned long DWORD
Definition: concrt.h:63
void _SetRuntimeOwnsLifetime(bool _FValue)
Definition: concrt.h:4225
_ElemType * _M_ElemArray
Definition: concrt.h:1088
This class describes an exception thrown when the Unblock method of a Context object is called from t...
Definition: concrt.h:1455
This class describes an exception thrown because of a failure to create a worker execution context in...
Definition: concrt.h:1235
~_StackGuard()
Definition: concrt.h:5150
bool _M_fDetached
Definition: concrt.h:4279
An event type that represents the initiation of some processing
Definition: concrt.h:5655
void * _M_pBinding
Definition: concrt.h:2079
_In_reads_(_N) wchar_t const *_S2
void _EnableTrace(unsigned char _Level, unsigned long _Flags)
Definition: concrt.h:5384
An event type that represents the deletion of an object
Definition: concrt.h:5667
_CONCRTIMP ~scoped_lock()
Destroys a scoped_lock object and releases the critical section supplied in its constructor.
Task collections represent groups of work which step outside the strict structuring of the _Structure...
Definition: concrt.h:4702
unsigned int _GetBindingId() const
Gets the binding ID for this location.
Definition: concrt.h:2051
volatile long _M_completedStolenChores
Definition: concrt.h:4459
void _UnregisterConcRTEventTracing()
Unregister ConcRT as an ETW Event Provider.
An event type that represents the act of a context becoming idle.
Definition: concrt.h:5485
::Concurrency::Context * _M_pContext
Definition: concrt.h:373
unsigned int _M_type
Definition: concrt.h:2059
An abstraction of a physical location on hardware.
Definition: concrt.h:1825
static _CONCRTIMP location __cdecl current()
Returns a location object representing the most specific place the calling thread is executing...
_CONCRTIMP missing_wait()
Constructs a missing_wait object.
_CONCRTIMP invalid_scheduler_policy_value()
Constructs an invalid_scheduler_policy_value object.
_CONCRTIMP bad_target()
Constructs a bad_target object.
This class describes an exception thrown when the CurrentScheduler::Detach method is called on a cont...
Definition: concrt.h:1343
_Scoped_lock const & operator=(const _Scoped_lock &)
_CONCRTIMP const GUID SchedulerEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to s...
_CONCRTIMP size_t wait(unsigned int _Timeout=COOPERATIVE_TIMEOUT_INFINITE)
Waits for the event to become signaled.
_Scoped_lock(_ReentrantLock &_Lock)
Definition: concrt.h:841
_Scoped_lock const & operator=(const _Scoped_lock &)
_ReentrantLock & _M_lock
Definition: concrt.h:852
bool _Set_next_writer(void *_PWriter)
Called for the first context in the writer queue. It sets the queue head and it tries to claim the lo...
_CONCRTIMP invalid_multiple_scheduling()
Constructs an invalid_multiple_scheduling object.
_CONCRT_BUFFER _M_activeNode[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:3644
_TaskCollection * _M_pNextAlias
Definition: concrt.h:5119
void _Invoke()
Definition: concrt.h:4209
_Scoped_lock const & operator=(const _Scoped_lock &)
static _Ty _Increment(volatile _Ty &_Location)
Definition: concrt.h:430
bool _Is_system() const
Internal routine that tells whether a location represents the "system location". This indicates no sp...
Definition: concrt.h:2014
An event type that represents the act of a context yielding.
Definition: concrt.h:5480
_Scoped_lock const & operator=(const _Scoped_lock &)
bool _Acquire_lock(void *_PLockingNode, bool _FHasExternalNode)
Acquires this critical section given a specific node to lock.
_CONCRTIMP const GUID AgentEventGuid
A category GUID ({B9B5B78C-0713-4898-A21A-C67949DCED07}) describing ETW events fired by the Agents li...
void _Acquire_lock(void *_PLockingNode, bool _FHasExternalNode)
Acquires a write lock given a specific write node to lock.
This class describes an exception thrown when the Concurrency Runtime detects that you neglected to c...
Definition: concrt.h:1689
_ElemType * _AddRawMallocaNode(void *_MallocaRet)
Definition: concrt.h:1124
void _Cancel()
Cancels work on the task collection.
Definition: concrt.h:5239
_In_ _Value
Definition: corecrt_wstdlib.h:65
This class describes an exception thrown when an unsupported operating system is used.
Definition: concrt.h:1264
location()
Constructs a location object.
Definition: concrt.h:1836
bool _M_fRuntimeOwnsLifetime
Definition: concrt.h:4276
_CONCRTIMP void unlock()
Unlocks the critical section.
An exception safe RAII wrapper that can be used to acquire reader_writer_lock lock objects as a write...
Definition: concrt.h:3774
static _CONCRTIMP location __cdecl _Current_node()
Returns a location representing the scheduling node that the calling thread is executing.
reader_writer_lock & _M_reader_writer_lock
Definition: concrt.h:3828
_StackGuard()
Definition: concrt.h:5141
void * _M_ptr
Definition: concrt.h:2074
static const unsigned int timeout_infinite
Value indicating that a wait should never time out.
Definition: concrt.h:4016
volatile long _M_executionStatus
The status of the task collection.
Definition: concrt.h:5110
static void _StoreWithRelease(volatile _Ty &_Location, _Ty _Rhs)
Definition: concrt.h:406
_TaskCollectionBase * _SafeGetParent()
Definition: concrt.h:4412
_MallocaArrayHolder & operator=(const _MallocaArrayHolder &)
static void _InternalFree(_UnrealizedChore *_PChore)
Definition: pplcancellation_token.h:221
_CONCRTIMP void set()
Signals the event.
volatile unsigned char EnableLevel
Definition: concrt.h:5382
virtual ~_MallocaArrayHolder()
Definition: concrt.h:1078
volatile long _M_numberOfWriters
Definition: concrt.h:1030
bool _IsStructured()
Definition: concrt.h:4354
_CONCRTIMP const GUID ScheduleGroupEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to s...
_CONCRTIMP default_scheduler_exists()
Constructs a default_scheduler_exists object.
long __cdecl _InterlockedDecrement(long volatile *)
_TaskCollection * _M_pOriginalCollection
Definition: concrt.h:5118
::Concurrency::critical_section _M_lock
Definition: concrt.h:4025
_CONCRTIMP void lock()
Acquires this critical section.
void * _Get_reader_convoy()
Called when writers are done with the lock, or when lock was free for claiming by the first reader co...
void * _M_pResetChain
Definition: concrt.h:4024
_Type
Describes the type of the given location.
Definition: concrt.h:1940
#define _CONCRTIMP
Definition: crtdefs.h:48
unsigned int _CONCRTIMP __cdecl _GetConcurrency()
Returns the hardware concurrency available to the Concurrency Runtime, taking into account process af...
void _DoYield()
Yields its time slice using the specified yieldFunction
Definition: concrt.h:697
long _Release()
Definition: concrt.h:4053
_CONCRTIMP bool try_lock()
Attempts to acquire the reader-writer lock as a writer without blocking.
_Diff _Count
Definition: algorithm:1941
Definition: concrt.h:279
_CONCRTIMP const GUID ResourceManagerEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to t...
volatile long _M_chaining
Definition: concrt.h:5125
_Ty operator++()
Definition: concrt.h:505
static _CONCRTIMP unsigned int __cdecl _GetNumberOfVirtualProcessors()
Indicates that the location represents the "system location". This has no specific affinity...
Definition: concrt.h:1945
_CONCRTIMP _Scoped_lock(_NonReentrantPPLLock &_Lock)
An exception safe RAII wrapper that can be used to acquire reader_writer_lock lock objects as a reade...
Definition: concrt.h:3806
void *volatile _M_pWaitChain
Definition: concrt.h:4023
static _CONCRTIMP size_t __cdecl wait_for_multiple(_In_reads_(_Count) event **_PPEvents, size_t _Count, bool _FWaitAll, unsigned int _Timeout=COOPERATIVE_TIMEOUT_INFINITE)
Waits for multiple events to become signaled.
void * _M_pWriterHead
Definition: concrt.h:3898
_CONCRTIMP unsigned int _Reference()
size_t & _Depth
Definition: concrt.h:5163
unsigned long _NumberOfSpins()
Determines the current spin count
Definition: concrt.h:736
_ReentrantPPLLock & _M_lock
Definition: concrt.h:4154
volatile long _M_refCount
Definition: concrt.h:4081
This class describes an exception thrown when the Scheduler::SetDefaultSchedulerPolicy method is call...
Definition: concrt.h:1396
volatile _Ty _M_value
Definition: concrt.h:485
_ElemNodeType * _M_FirstNode
Definition: concrt.h:1159
_CONCRTIMP ~reader_writer_lock()
Destroys the reader_writer_lock object.
_CONCRTIMP ~scoped_lock()
Destroys a reader_writer_lock object and releases the lock supplied in its constructor.
_CONCRTIMP unsupported_os()
Constructs an unsupported_os object.
void _Initialize(_ElemType *_Elem)
Definition: concrt.h:1051
_Chore()
Definition: concrt.h:4180
A manual reset event which is explicitly aware of the Concurrency Runtime.
Definition: concrt.h:3922
_CRT_MANAGED_FP_DEPRECATE _In_ unsigned int _Mask
Definition: float.h:235
virtual ~_UnrealizedChore()
Definition: concrt.h:4205
void _SetSpinCount(unsigned int _Count)
Set a dynamic spin count.
Definition: concrt.h:598
long __cdecl _InterlockedIncrement(long volatile *)
_CONCRTIMP improper_scheduler_attach()
Constructs an improper_scheduler_attach object.
unsigned int _M_id
Definition: concrt.h:2071
static _Ty _FetchAndAdd(volatile _Ty &_Location, _Ty _Addend)
Definition: concrt.h:425
This class describes an exception thrown when a policy key of a SchedulerPolicy object is set to an i...
Definition: concrt.h:1627
_Scoped_lock_read(_ReaderWriterLock &_Lock)
Definition: concrt.h:992
An exception safe RAII wrapper for a critical_section object.
Definition: concrt.h:3601
event & operator=(const event &_Event)
const size_t COOPERATIVE_WAIT_TIMEOUT
Value indicating that a wait timed out.
Definition: concrt.h:3469
_CONCRTIMP _SpinLock(volatile long &_Flag)
_CONCRTIMP::Concurrency::Scheduler * _GetScheduler()
Definition: concrt.h:382
bool _IsAbnormalExit() const
Definition: concrt.h:4381
bool operator==(const location &_Rhs) const
Determines whether two location objects represent the same location.
Definition: concrt.h:1907
#define SIZE_MAX
Definition: limits.h:76
char * va_list
Definition: vadefs.h:39
_CONCRTIMP critical_section()
Constructs a new critical section.
Internal maintenance structure for beacons.
Definition: concrt.h:5285
Definition: concrt.h:5636
volatile long _M_state
Definition: concrt.h:1025
_Ty _CompareAndSwap(_Ty _NewValue, _Ty _Comperand)
Definition: concrt.h:497
An event type that represents the act of a context blocking.
Definition: concrt.h:5470
_CONCRTIMP message_not_found()
Constructs a message_not_found object.
void _CONCRTIMP __cdecl _UnderlyingYield()
Default method for yielding during a spin wait
unsigned int _M_bindingId
Definition: concrt.h:2065
_StructuredTaskCollection()
Construct a new structured task collection.
Definition: concrt.h:4503
scoped_lock const & operator=(const scoped_lock &)
_CONCRT_BUFFER _M_activeWriter[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:3896
long __cdecl _InterlockedCompareExchange(long volatile *, long, long)
static _CONCRTIMP void __cdecl _Oversubscribe(bool _BeginOversubscription)
_MallocaListHolder()
Definition: concrt.h:1109
static void __cdecl _InvokeBridge(void *_PContext)
Definition: concrt.h:4253
virtual void _Destroy()
Definition: concrt.h:4069
void * _M_pWriterTail
Definition: concrt.h:3899
_CONCRTIMP improper_lock()
Constructs an improper_lock exception.
_Scoped_lock_read const & operator=(const _Scoped_lock_read &)
_CONCRT_BUFFER _M_lockNode[(4 *sizeof(void *)+2 *sizeof(unsigned int)+sizeof(_CONCRT_BUFFER)-1)/sizeof(_CONCRT_BUFFER)]
Definition: concrt.h:4119
void * HANDLE
Definition: concrt.h:72
event _M_event
An event on which to wait for stolen chores to complete.
Definition: concrt.h:5116
Definition: concrt.h:5630
_CONCRTIMP ~critical_section()
Destroys a critical section.
Definition: concrt.h:547
typedef _Return_type_success_(return >=0) long HRESULT
volatile long _M_lockState
Definition: concrt.h:3900
#define NULL
Definition: corecrt.h:158
_CONCRTIMP const GUID LockEventGuid
A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to l...
volatile long _M_owner
Definition: concrt.h:860
void * _OwningContext() const
Definition: concrt.h:4336
static _ChoreType * _InternalAlloc(const _Function &_Func)
Definition: concrt.h:4239
_CONCRTIMP bool try_lock()
Tries to acquire the lock without blocking.
This class describes an exception thrown when the Context::Oversubscribe method is called with the _B...
Definition: concrt.h:1772
static _CONCRTIMP _Scheduler __cdecl _Get()
_SpinState
State of the spin wait class.
Definition: concrt.h:684
_Ty operator--()
Definition: concrt.h:513
_CONCRTIMP void lock_read()
Acquires the reader-writer lock as a reader. If there are writers, active readers have to wait until ...