STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Public Member Functions | Private Member Functions | Private Attributes | List of all members
Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator > Class Template Reference

#include <ppl.h>

Public Member Functions

 _Parallel_chunk_helper (_Index_type, const _Random_iterator &_First, _Index_type _First_iteration, _Index_type _Last_iteration, const _Index_type &_Step, const _Function &_Func, const _Partitioner &, _Worker_proxy< _Index_type > *const _Parent_data=NULL)
 
 _Parallel_chunk_helper (const _Random_iterator &_First, const _Index_type &_Step, const _Function &_Func, const _Range< _Index_type > &_Worker_range, _Worker_proxy< _Index_type > *const _Parent_data=NULL)
 
void operator() () const
 

Private Member Functions

_Parallel_chunk_helper const & operator= (_Parallel_chunk_helper const &)
 

Private Attributes

const _Random_iterator & _M_first
 
const _Index_type & _M_step
 
const _Function & _M_function
 
const _Index_type _M_first_iteration
 
const _Index_type _M_last_iteration
 
_Worker_proxy< _Index_type > *const _M_parent_worker
 

Constructor & Destructor Documentation

template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_Parallel_chunk_helper ( _Index_type  ,
const _Random_iterator &  _First,
_Index_type  _First_iteration,
_Index_type  _Last_iteration,
const _Index_type &  _Step,
const _Function &  _Func,
const _Partitioner &  ,
_Worker_proxy< _Index_type > *const  _Parent_data = NULL 
)
inline
2138  :
2139  _M_first(_First), _M_first_iteration(_First_iteration), _M_last_iteration(_Last_iteration), _M_step(_Step), _M_function(_Func),
2140  _M_parent_worker(_Parent_data)
2141  {
2142  // Empty constructor because members are already assigned
2143  }
const _Function & _M_function
Definition: ppl.h:2266
const _Index_type _M_last_iteration
Definition: ppl.h:2269
_Worker_proxy< _Index_type > *const _M_parent_worker
Definition: ppl.h:2271
const _Index_type _M_first_iteration
Definition: ppl.h:2268
const _Random_iterator & _M_first
Definition: ppl.h:2264
const _Index_type & _M_step
Definition: ppl.h:2265
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_Parallel_chunk_helper ( const _Random_iterator &  _First,
const _Index_type &  _Step,
const _Function &  _Func,
const _Range< _Index_type > &  _Worker_range,
_Worker_proxy< _Index_type > *const  _Parent_data = NULL 
)
inline
2147  :
2148  _M_first(_First), _M_first_iteration(_Worker_range._M_current_iteration), _M_last_iteration(_Worker_range._M_last_iteration), _M_step(_Step), _M_function(_Func),
2149  _M_parent_worker(_Parent_data)
2150  {
2151  // Empty constructor because members are already assigned
2152  }
const _Function & _M_function
Definition: ppl.h:2266
const _Index_type _M_last_iteration
Definition: ppl.h:2269
_Worker_proxy< _Index_type > *const _M_parent_worker
Definition: ppl.h:2271
const _Index_type _M_first_iteration
Definition: ppl.h:2268
const _Random_iterator & _M_first
Definition: ppl.h:2264
const _Index_type & _M_step
Definition: ppl.h:2265

Member Function Documentation

template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
void Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::operator() ( ) const
inline
2160  {
2161  _Range<_Index_type> _Worker_range(_M_first_iteration, _M_last_iteration);
2162 
2163  // This class has two modes: worker and helper. The originally split chunk is always a
2164  // worker, while any subsequent class spawned from this class is in the helper
2165  // mode, which is signified using a link to the worker class through _M_pOwning_worker
2166  // handle. So, it will wait for work to be dished out by the working class while in helper mode.
2167  if (_M_parent_worker != NULL && !_M_parent_worker->_Receive_range(&_Worker_range))
2168  {
2169  // If the worker class rejected the help, simply return
2170  return;
2171  }
2172 
2173  // Keep the secondary, scaled, loop index for quick indexing into the data structure
2174  _Index_type _Current_iteration = _Worker_range._M_current_iteration;
2175  _Index_type _Scaled_index = _Current_iteration * _M_step;
2176 
2177  // If there is only one iteration to be executed there is no need to initialize any
2178  // helper classes (work is indivisible).
2179  if (_Worker_range._Number_of_iterations() == 1)
2180  {
2181  // Execute one iteration
2183  return;
2184  }
2185 
2186  // If the execution reaches this point it means that this class now has a chunk of work
2187  // that it needs to get done, so it has transitioned into the worker mode.
2188  structured_task_group _Helper_group;
2189 
2190  // Initialize fields that are needed in the helper
2191  _Worker_proxy<_Index_type> _Worker(_M_parent_worker);
2192 
2193  // Instantiate a helper class for this working class and put it on the work queue.
2194  // If some thread is idle it will be able to steal the helper and help this class
2195  // finish its work by stealing a piece of the work range.
2196  task_handle<_Parallel_chunk_helper> _Helper_task(_Parallel_chunk_helper(_M_first, _M_step, _M_function, _Worker_range, &_Worker));
2197 
2198  _Helper_group.run(_Helper_task);
2199 
2201 
2202  // Normally, for a cancellation semantic in cooperation with the helper, we would run_and_wait the below code on the Helper_group. Unfortunately,
2203  // the capture by reference of things which must be shared (_Worker, and so forth) will cause the loop below to add additional indirection
2204  // instructions. The loop below *MUST* be as tight as possible with the defined semantics. Instead, we will manually notify our parent if the
2205  // worker's destructor runs without hitting the bottom of our chunk. This is done through notification on the beacon.
2206 
2207  for (_Index_type _I = _Current_iteration; _I < _Worker_range._M_last_iteration; (_I++, _Worker_range._M_current_iteration =_I, _Scaled_index += _M_step))
2208  {
2209  if (_Worker._Is_beacon_signaled())
2210  {
2211  // Either a parent task group is canceled or one of the other iterations
2212  // threw an exception. Abort the remaining iterations
2213  //
2214  // Note that this could be a false positive that we must verify.
2215  if (_Worker._Is_done() || _Worker._Verify_beacon_cancellation())
2216  {
2217  break;
2218  }
2219  }
2220 
2221  if (_Worker._Is_helper_registered())
2222  {
2223  // The helper class (there can only be one) registered to help this class with the work.
2224  // Thus, figure out if this class needs help and split the range among the two classes.
2225 
2226  if (_Worker._Send_range(&_Worker_range))
2227  {
2228  // Construct every new instance of a helper class on the stack because it is beneficial to use
2229  // a structured task group where the class itself is responsible for task handle's lifetime.
2230  task_handle<_Parallel_chunk_helper> * _Helper_subtask = _Holder._AddRawMallocaNode(_malloca(_Holder._GetAllocationSize()));
2231 
2232  new(_Helper_subtask) task_handle<_Parallel_chunk_helper>
2233  (_Parallel_chunk_helper(_M_first, _M_step, _M_function, _Worker_range, &_Worker));
2234 
2235  // If _Send_range returns true, that means that there is still some non-trivial
2236  // work to be done, so this class will potentially need another helper.
2237  _Helper_group.run(*_Helper_subtask);
2238  }
2239  }
2240 
2241  // Allow intrusive stealing by the helper
2242  _Worker._Enable_intrusive_steal(&_Worker_range);
2243 
2244  // Execute one iteration: the element is at scaled index away from the first element.
2246 
2247  // Helper shall not steal a range after this call
2248  _Worker._Disable_intrusive_steal();
2249  }
2250 
2251  // Indicate that the worker is done with its iterations.
2252  _Worker._Set_done();
2253 
2254  // Wait for all worker/helper iterations to finish
2255  _Helper_group.wait();
2256 
2257  // Make sure that we've signaled that the tree is complete. This is used to detect any exception out of either _Parallel_chunk_helper_invoke or
2258  // _Helper_group.wait() above as a cancellation of the loop which must propagate upwards because we do not wrap the loop body in run_and_wait.
2259  _Worker._Set_tree_done();
2260  }
static void __cdecl _Invoke(const _Random_iterator &_First, _Index_type &_Index, const _Function &_Func)
Definition: ppl.h:1784
_Parallel_chunk_helper(_Index_type, const _Random_iterator &_First, _Index_type _First_iteration, _Index_type _Last_iteration, const _Index_type &_Step, const _Function &_Func, const _Partitioner &, _Worker_proxy< _Index_type > *const _Parent_data=NULL)
Definition: ppl.h:2137
const _Function & _M_function
Definition: ppl.h:2266
#define _malloca(size)
Definition: malloc.h:127
size_t _GetAllocationSize() const
Definition: concrt.h:1104
const _Index_type _M_last_iteration
Definition: ppl.h:2269
_ElemType * _AddRawMallocaNode(void *_MallocaRet)
Definition: concrt.h:1124
_Worker_proxy< _Index_type > *const _M_parent_worker
Definition: ppl.h:2271
const _Index_type _M_first_iteration
Definition: ppl.h:2268
const _Random_iterator & _M_first
Definition: ppl.h:2264
const _Index_type & _M_step
Definition: ppl.h:2265
#define NULL
Definition: corecrt.h:158
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
_Parallel_chunk_helper const& Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::operator= ( _Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator > const &  )
private

Member Data Documentation

template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Random_iterator& Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_first
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Index_type Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_first_iteration
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Function& Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_function
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Index_type Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_last_iteration
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
_Worker_proxy<_Index_type>* const Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_parent_worker
private
template<typename _Random_iterator , typename _Index_type , typename _Function , typename _Partitioner , bool _Is_iterator>
const _Index_type& Concurrency::_Parallel_chunk_helper< _Random_iterator, _Index_type, _Function, _Partitioner, _Is_iterator >::_M_step
private

The documentation for this class was generated from the following file: