STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Public Member Functions | Private Member Functions | Private Attributes | List of all members
Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator > Class Template Reference

#include <ppl.h>

Public Member Functions

 _Parallel_chunk_helper (_Index_type, const _Random_iterator &_First, _Index_type _First_iteration, _Index_type _Last_iteration, const _Index_type &_Step, const _Function &_Func, const _Partitioner &, _Worker_proxy< _Index_type > *const _Parent_data=NULL)
 
 _Parallel_chunk_helper (const _Random_iterator &_First, const _Index_type &_Step, const _Function &_Func, const _Range< _Index_type > &_Worker_range, _Worker_proxy< _Index_type > *const _Parent_data=NULL)
 
void operator() () const
 

Private Member Functions

_Parallel_chunk_helper const & operator= (_Parallel_chunk_helper const &)
 

Private Attributes

const _Random_iterator & _M_first
 
const _Index_type & _M_step
 
const _Function & _M_function
 
const _Index_type _M_first_iteration
 
const _Index_type _M_last_iteration
 
_Worker_proxy< _Index_type > *const _M_parent_worker
 

Constructor & Destructor Documentation

template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_Parallel_chunk_helper ( _Index_type  ,
const _Random_iterator &  _First,
_Index_type  _First_iteration,
_Index_type  _Last_iteration,
const _Index_type &  _Step,
const _Function &  _Func,
const _Partitioner &  ,
_Worker_proxy< _Index_type > *const  _Parent_data = NULL 
)
inline
2142  :
2143  _M_first(_First), _M_first_iteration(_First_iteration), _M_last_iteration(_Last_iteration), _M_step(_Step), _M_function(_Func),
2144  _M_parent_worker(_Parent_data)
2145  {
2146  // Empty constructor because members are already assigned
2147  }
const _Function & _M_function
Definition: ppl.h:2270
const _Index_type _M_last_iteration
Definition: ppl.h:2273
_Worker_proxy< _Index_type > *const _M_parent_worker
Definition: ppl.h:2275
const _Index_type _M_first_iteration
Definition: ppl.h:2272
const _Random_iterator & _M_first
Definition: ppl.h:2268
const _Index_type & _M_step
Definition: ppl.h:2269
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_Parallel_chunk_helper ( const _Random_iterator &  _First,
const _Index_type &  _Step,
const _Function &  _Func,
const _Range< _Index_type > &  _Worker_range,
_Worker_proxy< _Index_type > *const  _Parent_data = NULL 
)
inline
2151  :
2152  _M_first(_First), _M_first_iteration(_Worker_range._M_current_iteration), _M_last_iteration(_Worker_range._M_last_iteration), _M_step(_Step), _M_function(_Func),
2153  _M_parent_worker(_Parent_data)
2154  {
2155  // Empty constructor because members are already assigned
2156  }
const _Function & _M_function
Definition: ppl.h:2270
const _Index_type _M_last_iteration
Definition: ppl.h:2273
_Worker_proxy< _Index_type > *const _M_parent_worker
Definition: ppl.h:2275
const _Index_type _M_first_iteration
Definition: ppl.h:2272
const _Random_iterator & _M_first
Definition: ppl.h:2268
const _Index_type & _M_step
Definition: ppl.h:2269

Member Function Documentation

template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
void Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::operator() ( ) const
inline
2164  {
2165  _Range<_Index_type> _Worker_range(_M_first_iteration, _M_last_iteration);
2166 
2167  // This class has two modes: worker and helper. The originally split chunk is always a
2168  // worker, while any subsequent class spawned from this class is in the helper
2169  // mode, which is signified using a link to the worker class through _M_pOwning_worker
2170  // handle. So, it will wait for work to be dished out by the working class while in helper mode.
2171  if (_M_parent_worker != NULL && !_M_parent_worker->_Receive_range(&_Worker_range))
2172  {
2173  // If the worker class rejected the help, simply return
2174  return;
2175  }
2176 
2177  // Keep the secondary, scaled, loop index for quick indexing into the data structure
2178  _Index_type _Current_iteration = _Worker_range._M_current_iteration;
2179  _Index_type _Scaled_index = _Current_iteration * _M_step;
2180 
2181  // If there is only one iteration to be executed there is no need to initialize any
2182  // helper classes (work is indivisible).
2183  if (_Worker_range._Number_of_iterations() == 1)
2184  {
2185  // Execute one iteration
2187  return;
2188  }
2189 
2190  // If the execution reaches this point it means that this class now has a chunk of work
2191  // that it needs to get done, so it has transitioned into the worker mode.
2192  structured_task_group _Helper_group;
2193 
2194  // Initialize fields that are needed in the helper
2195  _Worker_proxy<_Index_type> _Worker(_M_parent_worker);
2196 
2197  // Instantiate a helper class for this working class and put it on the work queue.
2198  // If some thread is idle it will be able to steal the helper and help this class
2199  // finish its work by stealing a piece of the work range.
2200  task_handle<_Parallel_chunk_helper> _Helper_task(_Parallel_chunk_helper(_M_first, _M_step, _M_function, _Worker_range, &_Worker));
2201 
2202  _Helper_group.run(_Helper_task);
2203 
2205 
2206  // Normally, for a cancellation semantic in cooperation with the helper, we would run_and_wait the below code on the Helper_group. Unfortunately,
2207  // the capture by reference of things which must be shared (_Worker, and so forth) will cause the loop below to add additional indirection
2208  // instructions. The loop below *MUST* be as tight as possible with the defined semantics. Instead, we will manually notify our parent if the
2209  // worker's destructor runs without hitting the bottom of our chunk. This is done through notification on the beacon.
2210 
2211  for (_Index_type _I = _Current_iteration; _I < _Worker_range._M_last_iteration; (_I++, _Worker_range._M_current_iteration =_I, _Scaled_index += _M_step))
2212  {
2213  if (_Worker._Is_beacon_signaled())
2214  {
2215  // Either a parent task group is canceled or one of the other iterations
2216  // threw an exception. Abort the remaining iterations
2217  //
2218  // Note that this could be a false positive that we must verify.
2219  if (_Worker._Is_done() || _Worker._Verify_beacon_cancellation())
2220  {
2221  break;
2222  }
2223  }
2224 
2225  if (_Worker._Is_helper_registered())
2226  {
2227  // The helper class (there can only be one) registered to help this class with the work.
2228  // Thus, figure out if this class needs help and split the range among the two classes.
2229 
2230  if (_Worker._Send_range(&_Worker_range))
2231  {
2232  // Construct every new instance of a helper class on the stack because it is beneficial to use
2233  // a structured task group where the class itself is responsible for task handle's lifetime.
2234  task_handle<_Parallel_chunk_helper> * _Helper_subtask = _Holder._AddRawMallocaNode(_malloca(_Holder._GetAllocationSize()));
2235 
2236  new(_Helper_subtask) task_handle<_Parallel_chunk_helper>
2237  (_Parallel_chunk_helper(_M_first, _M_step, _M_function, _Worker_range, &_Worker));
2238 
2239  // If _Send_range returns true, that means that there is still some non-trivial
2240  // work to be done, so this class will potentially need another helper.
2241  _Helper_group.run(*_Helper_subtask);
2242  }
2243  }
2244 
2245  // Allow intrusive stealing by the helper
2246  _Worker._Enable_intrusive_steal(&_Worker_range);
2247 
2248  // Execute one iteration: the element is at scaled index away from the first element.
2250 
2251  // Helper shall not steal a range after this call
2252  _Worker._Disable_intrusive_steal();
2253  }
2254 
2255  // Indicate that the worker is done with its iterations.
2256  _Worker._Set_done();
2257 
2258  // Wait for all worker/helper iterations to finish
2259  _Helper_group.wait();
2260 
2261  // Make sure that we've signaled that the tree is complete. This is used to detect any exception out of either _Parallel_chunk_helper_invoke or
2262  // _Helper_group.wait() above as a cancellation of the loop which must propagate upwards because we do not wrap the loop body in run_and_wait.
2263  _Worker._Set_tree_done();
2264  }
static void __cdecl _Invoke(const _Random_iterator &_First, _Index_type &_Index, const _Function &_Func)
Definition: ppl.h:1788
_Parallel_chunk_helper(_Index_type, const _Random_iterator &_First, _Index_type _First_iteration, _Index_type _Last_iteration, const _Index_type &_Step, const _Function &_Func, const _Partitioner &, _Worker_proxy< _Index_type > *const _Parent_data=NULL)
Definition: ppl.h:2141
const _Function & _M_function
Definition: ppl.h:2270
#define NULL
Definition: crtdbg.h:30
#define _malloca(size)
Definition: malloc.h:228
size_t _GetAllocationSize() const
Definition: concrt.h:1127
const _Index_type _M_last_iteration
Definition: ppl.h:2273
_ElemType * _AddRawMallocaNode(void *_MallocaRet)
Definition: concrt.h:1147
_Worker_proxy< _Index_type > *const _M_parent_worker
Definition: ppl.h:2275
const _Index_type _M_first_iteration
Definition: ppl.h:2272
const _Random_iterator & _M_first
Definition: ppl.h:2268
const _Index_type & _M_step
Definition: ppl.h:2269
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
_Parallel_chunk_helper const& Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::operator= ( _Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator > const &  )
private

Member Data Documentation

template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Random_iterator& Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_first
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Index_type Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_first_iteration
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Function& Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_function
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Index_type Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_last_iteration
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
_Worker_proxy<_Index_type>* const Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_parent_worker
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Index_type& Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_step
private

The documentation for this class was generated from the following file: