STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
xxamp.h
Go to the documentation of this file.
1 /***
2 * ==++==
3 *
4 * Copyright (c) Microsoft Corporation. All rights reserved.
5 *
6 * ==--==
7 * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
8 *
9 * xxamp.h
10 *
11 * C++ AMP Library helper classes
12 *
13 * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
14 ****/
15 
16 #pragma once
17 
18 #define CPP_AMP_MAX_RANK 128
19 #define _CPP_AMP_VERIFY_RANK(_Rank, _Type_name) \
20  static_assert(((_Rank) > 0) && ((_Rank) <= CPP_AMP_MAX_RANK), "The _Rank of " #_Type_name " should be greater than 0 and <= 128.")
21 
22 // This macro is used to determine whether a specified iterator is a mutable iterator
23 // by ensuring that the iterator_category iterator_trait is or inherits from either the
24 // output_iterator_tag or forward_iterator_tag. We use a static_assert to emit a compilation
25 // error when the iterator does not meet this requirement. This macro is used for verifying that
26 // the destination iterator passed to the concurrency::copy overloads is of the right type.
27 #define _CPP_AMP_VERIFY_MUTABLE_ITERATOR(_Type_name) \
28  static_assert((std::is_base_of<std::output_iterator_tag, typename std::iterator_traits<_Type_name>::iterator_category>::value \
29  || std::is_base_of<std::forward_iterator_tag, typename std::iterator_traits<_Type_name>::iterator_category>::value), \
30  "Invalid destination argument type to concurrency::copy.")
31 
32 #pragma pack(push,8)
33 
34 //=============================================================================
35 // Internal Intrinsic Functions used for implementing libraries
36 //=============================================================================
37 extern "C"
38 {
39 //=============================================================================
40 // Intrinsics that access textures
41 //=============================================================================
42 void __dp_read_texture(const void * /* pTex */,
43  _Out_ void * /* pRet */,
44  unsigned int /* x */,
45  unsigned int /* y */,
46  unsigned int /* z */,
47  unsigned int /* Miplevel */) __GPU_ONLY;
48 
49 void __dp_write_texture(_Out_ void * /* pTex */,
50  const void * /* pValue */,
51  unsigned int /* x */,
52  unsigned int /* y */,
53  unsigned int /* z */) __GPU_ONLY;
54 
55 void __dp_sample_texture(const void * /* pTex */,
56  const void * /* pSampler */,
57  _Out_ void* /* pRet */,
58  float /* x */,
59  float /* y */,
60  float /* z */,
61  unsigned int /* kind: 0 - GatherRed, 1 - GatherGreen, 2 - GatherBlue, 3 - GatherAlpha, 4 - Sample */,
62  float /* LevelOfDetail */) __GPU_ONLY;
63 
64 void __dp_sample_texture_predefined(const void * /* pTex */,
65  _Out_ void * /* pRet */,
66  float /* x */,
67  float /* y */,
68  float /* z */,
69  unsigned int /* predefinedSamplerId: filter_mode << 16 | address_mode */,
70  unsigned int /* kind: 0 - GatherRed, 1 - GatherGreen, 2 - GatherBlue, 3 - GatherAlpha, 4 - Sample */,
71  float /* LevelOfDetail */) __GPU_ONLY;
72 }
73 
74 namespace Concurrency
75 {
76  // forward decls
77  template <typename _Value_type, int _Rank>
78  class array;
79 
80  template <typename _Value_type, int _Rank>
81  class array_view;
82 
83  template <int _Dim0, int _Dim1 = 0, int _Dim2 = 0>
84  class tiled_extent;
85 
86  template <int _Rank>
87  class extent;
88 
89  template <int _Rank>
90  class index;
91 
92 namespace details
93 {
94  // forward decls
95  template <int _Rank, int _Element_size>
96  class _Array_view_shape;
97 
98  template <int _Rank, int _Element_size>
99  class _Array_view_base;
100 
101  // Helper class to avoid static_assertion errors on uninstantiated
102  // templates.
103  template <typename _T>
104  struct _Falsifier
105  {
106  static const bool value = false;
107  };
108 
109  // Helper classes for array and array_view projection
110  template <typename _T, int _R>
112  {
113  static_assert(!std::is_const<_T>::value, "const _T is not supported");
114  static_assert(_R >= 2, "Rank must be greater than or equal to 2");
115 
116  public:
117  typedef array_view<const _T,_R-1> _Const_result_type;
118  typedef array_view<_T,_R-1> _Result_type;
119  };
120 
121  template <typename _T>
123  {
124  static_assert(!std::is_const<_T>::value, "const _T is not supported");
125 
126  public:
127  typedef const _T& _Const_result_type;
128  typedef _T& _Result_type;
129  };
130 
131  template <typename _T, int _R>
133  {
134  static_assert(!std::is_const<_T>::value, "const _T is not supported");
135  static_assert(_R >= 2, "Rank must be greater than or equal to 2");
136 
137  public:
139  };
140 
141  template <typename _T>
143  {
144  static_assert(!std::is_const<_T>::value, "const _T is not supported");
145 
146  public:
148  };
149 
150  template <typename _T, int _R>
152  {
153  static_assert(!std::is_const<_T>::value, "const _T is not supported");
154  static_assert(_R >= 2, "Rank must be greater than or equal to 2");
155 
156  public:
157  static typename _Projection_result_type<_T,_R>::_Result_type _Project0(const array_view<_T,_R>* _Arr_view, int _I) __GPU;
158  };
159 
160  template <typename _T>
162  {
163  static_assert(!std::is_const<_T>::value, "const _T is not supported");
164 
165  public:
166  static typename _Projection_result_type<_T,1>::_Result_type _Project0(const array_view<_T,1>* _Arr_view, int _I) __GPU;
167  };
168 
169  template <typename _T, int _R>
171  {
172  static_assert(!std::is_const<_T>::value, "const _T is not supported");
173  static_assert(_R >= 2, "Rank must be greater than or equal to 2");
174 
175  public:
177  };
178 
179  template <typename _T>
181  {
182  static_assert(!std::is_const<_T>::value, "const _T is not supported");
183 
184  public:
185  static typename _Projection_result_type<_T,1>::_Const_result_type _Project0(const array<_T,1>* _Array, int _I) __GPU;
186  };
187 
188  template <typename _T, int _R>
190  {
191  static_assert(!std::is_const<_T>::value, "const _T is not supported");
192  static_assert(_R >= 2, "Rank must be greater than or equal to 2");
193 
194  public:
196  };
197 
198  template <typename _T>
200  {
201  static_assert(!std::is_const<_T>::value, "const _T is not supported");
202 
203  public:
205  };
206 
207  // leading underscore implies 'private'
210  };
211 
212  // Helpers to create not initialized tuples
213  template<class _Tuple_type>
215  {
216  return _Tuple_type(details::_do_not_initialize);
217  };
218 
219 
220  // ========================================================
221  // helpers for unroll the loops for lower rank
222  // ========================================================
223 
224  // operation kinds
225  enum _op_kind
226  {
227  // cmp op
228  opEq, // a == b
229  opNeq, // a != b
230  // not op
231  opNot, // !a
232  // compound assignment
233  opAssign, // a = b
234  opAddEq, // a += b;
235  opSubEq, // a -= b;
236  opMulEq, // a *= b
237  opDivEq, // a /= b
238  opModEq, // a %= b
239  // arithmetic ops
240  opAdd, // c = a + b
241  opSub, // c = a - b
242  opMul, // c = a * b
243  opDiv, // c = a / b
244  opMod, // c = a % b
245  };
246 
247  const static int LOOP_UNROLL_THRESHOLD = 4;
248 
249  // forward declarations
250  template<typename _T, _op_kind _Kind, int _Rank = _T::rank, bool _Unroll = (_Rank > 1 && _Rank <= LOOP_UNROLL_THRESHOLD)>
252 
253  template<typename _T, _op_kind _Kind, int _Rank = _T::rank, bool _Unroll = (_Rank > 1 && _Rank <= LOOP_UNROLL_THRESHOLD)>
255 
256  template<typename _T, _op_kind _Kind, int _Rank = _T::rank, bool _Unroll = (_Rank > 1 && _Rank <= LOOP_UNROLL_THRESHOLD)>
258 
259  template<typename _T, int _Rank = _T::rank, bool _Unroll = (_Rank > 1 && _Rank <= LOOP_UNROLL_THRESHOLD)>
261 
262  // comparison operations
263  template<typename _T, _op_kind _Kind>
265 
266  template<typename _T>
268  {
269  static bool func(const _T & a, const _T & b) __GPU { return a == b; }
270  };
271 
272  template<typename _T>
274  {
275  static bool func(const _T & a, const _T & b) __GPU { return a != b; }
276  };
277 
278  template<typename _T, _op_kind _Kind>
279  struct _cmp_op_helper
280  {
281  static bool func(const _T & a, const _T & b) __GPU { static_assert(_Falsifier<_T>::value, "invalid op"); return false;}
282  };
283 
284  // operations: a op= b
285  template<typename _T, _op_kind _Kind>
287 
288  template<typename _T>
290  {
291  static void func(_T & a, const _T & b) __GPU { a = b; }
292  };
293 
294  template<typename _T>
296  {
297  static void func(_T & a, const _T & b) __GPU { a += b; }
298  };
299 
300  template<typename _T>
302  {
303  static void func(_T & a, const _T & b) __GPU { a -= b; }
304  };
305 
306  template<typename _T>
308  {
309  static void func(_T & a, const _T & b) __GPU { a *= b; }
310  };
311 
312  template<typename _T>
314  {
315  static void func(_T & a, const _T & b) __GPU { a /= b; }
316  };
317 
318  template<typename _T>
320  {
321  static void func(_T & a, const _T & b) __GPU { a %= b; }
322  };
323 
324  template<typename _T, _op_kind _Kind>
325  struct _compound_assign_op_helper
326  {
327  static void func(_T & a, const _T & b) __GPU { static_assert(_Falsifier<_T>::value, "invalid op"); }
328  };
329 
330  // operations: a = b + c
331  template<typename _T, _op_kind _Kind>
333 
334  template<typename _T>
336  {
337  static void func(_T & a, const _T & b, const _T & c) __GPU { a = b + c; }
338  };
339 
340  template<typename _T>
342  {
343  static void func(_T & a, const _T & b, const _T & c) __GPU { a = b - c; }
344  };
345 
346  template<typename _T>
348  {
349  static void func(_T & a, const _T & b, const _T & c) __GPU { a = b * c; }
350  };
351 
352  template<typename _T>
354  {
355  static void func(_T & a, const _T & b, const _T & c) __GPU { a = b / c; }
356  };
357 
358  template<typename _T>
360  {
361  static void func(_T & a, const _T & b, const _T & c) __GPU { a = b % c; }
362  };
363 
364  template<typename _T, _op_kind _Kind>
365  struct _arithmetic_op_helper
366  {
367  static void func(_T & a, const _T & b, const _T & c) __GPU { static_assert(_Falsifier<_T>::value, "invalid op"); }
368  };
369 
370 
371 #pragma warning( push )
372 #pragma warning( disable : 4100 ) // unreferenced formal parameter
373  template<typename _T1>
375  {
376  template<typename _T2>
377  static typename _T1::value_type func(const _T2 & a, int i) __GPU
378  {
379  return a[i];
380  }
381 
382  static typename _T1::value_type func(typename _T1::value_type a[_T1::rank], int i) __GPU
383  {
384  return a[i];
385  }
386 
387  static typename _T1::value_type func(typename _T1::value_type a, int i) __GPU
388  {
389  return a;
390  }
391  };
392 #pragma warning( pop )
393 
394  // loop unrolling helpers, unroll the loop if rank <= LOOP_UNROLL_THRESHOLD
395 
396 
397  // helper class to unroll the loop that uses cmp ops
398 
399  // a[i] op b[i]
400  template<typename _T, _op_kind _Kind>
401  struct _cmp_op_loop_helper<_T, _Kind, 1, false /* _Unroll */ >
402  {
403  static bool func(const _T& a, const _T& b) __GPU
404  {
406  }
407  };
408 
409  template<typename _T, _op_kind _Kind, int _Rank>
410  struct _cmp_op_loop_helper<_T, _Kind, _Rank, true /* _Unroll */ >
411  {
412  static bool func(const _T& a, const _T& b) __GPU
413  {
414  if (!_cmp_op_helper<typename _T::value_type, _Kind>::func(a[_Rank - 1], b[_Rank - 1]))
415  return false;
416  else
418  }
419  };
420 
421  template<typename _T, _op_kind _Kind, int _Rank>
422  struct _cmp_op_loop_helper<_T, _Kind, _Rank, false /* _Unroll */ >
423  {
424  static bool func(const _T& a, const _T& b) __GPU
425  {
426  for (int i = 0; i < _Rank; i++)
427  {
429  return false;
430  }
431  return true;
432  }
433  };
434 
435  // helper class for loop that uses a op= b
436 
437  template<typename _T, _op_kind _Kind>
438  struct _compound_assign_op_loop_helper<_T, _Kind, 1, false /* _Unroll */>
439  {
440  template<typename _T2>
441  static void func(_T& a, const _T2& b) __GPU
442  {
444  }
445  };
446 
447  template<typename _T, _op_kind _Kind, int _Rank>
448  struct _compound_assign_op_loop_helper<_T, _Kind, _Rank, true /* _Unroll */>
449  {
450  template<typename _T2>
451  static void func(_T& a, const _T2& b) __GPU
452  {
455  }
456  };
457 
458  template<_op_kind _Kind, typename _T, int _Rank>
459  struct _compound_assign_op_loop_helper<_T, _Kind, _Rank, false /* _Unroll */>
460  {
461  template<typename _T2>
462  static void func(_T& a, const _T2& b) __GPU
463  {
464  for (int i = 0; i < _Rank; i++)
465  {
467  }
468  }
469  };
470 
471  // specialization of _compound_assign_op_loop_helper for opAssign
472  template<typename _T>
474  {
475  template<typename _T2>
476  static void func(_T& a, const _T2& b) __GPU
477  {
478  a[0] = b[0];
479  }
480 
481  static void func(_T& a, typename _T::value_type b) __GPU
482  {
483  a[0] = b;
484  }
485  };
486 
487  template<typename _T>
488  struct _compound_assign_op_loop_helper<_T, opAssign, 2, true /* _Unroll */>
489  {
490  template<typename _T2>
491  static void func(_T& a, const _T2& b) __GPU
492  {
493  a[0] = b[0];
494  a[1] = b[1];
495  }
496 
497  static void func(_T& a, typename _T::value_type b) __GPU
498  {
499  a[0] = b;
500  a[1] = b;
501  }
502  };
503 
504  template<typename _T>
505  struct _compound_assign_op_loop_helper<_T, opAssign, 3, true /* _Unroll */>
506  {
507  template<typename _T2>
508  static void func(_T& a, const _T2& b) __GPU
509  {
510  a[0] = b[0];
511  a[1] = b[1];
512  a[2] = b[2];
513  }
514 
515  static void func(_T& a, typename _T::value_type b) __GPU
516  {
517  a[0] = b;
518  a[1] = b;
519  a[2] = b;
520  }
521  };
522 
523  template<typename _T>
524  struct _compound_assign_op_loop_helper<_T, opAssign, 4, true /* _Unroll */>
525  {
526  template<typename _T2>
527  static void func(_T& a, const _T2& b) __GPU
528  {
529  a[0] = b[0];
530  a[1] = b[1];
531  a[2] = b[2];
532  a[3] = b[3];
533  }
534 
535  static void func(_T& a, typename _T::value_type b) __GPU
536  {
537  a[0] = b;
538  a[1] = b;
539  a[2] = b;
540  a[3] = b;
541  }
542  };
543 
544  // helper class for loop that uses a = b op c
545 
546  template<typename _T, _op_kind _Kind>
547  struct _arithmetic_op_loop_helper<_T, _Kind, 1, false /* _Unroll */>
548  {
549  template<typename _T1, typename _T2>
550  static void func(_T& a, const _T1& b, const _T2& c) __GPU
551  {
555  }
556  };
557 
558  template<typename _T, _op_kind _Kind, int _Rank>
559  struct _arithmetic_op_loop_helper<_T, _Kind, _Rank, true /* _Unroll */>
560  {
561  template<typename _T1, typename _T2>
562  static void func(_T& a, const _T1& b, const _T2& c) __GPU
563  {
566  _index_helper<_T>::func(b, _Rank - 1),
567  _index_helper<_T>::func(c, _Rank - 1));
568  }
569  };
570 
571  template<typename _T, _op_kind _Kind, int _Rank>
572  struct _arithmetic_op_loop_helper<_T, _Kind, _Rank, false /* _Unroll */>
573  {
574  template<typename _T1, typename _T2>
575  static void func(_T& a, const _T1& b, const _T2& c) __GPU
576  {
577  for (int i = 0; i < _Rank; i++)
578  {
582  }
583  }
584  };
585 
586  // helper for unroll the loop for product operation
587 
588  template<typename _T>
589  struct _product_helper<_T, 1, false /* _Unroll */>
590  {
591  template<typename _T1>
592  static typename _T::value_type func(const _T1 & a) __GPU
593  {
594  return a[0];
595  }
596  };
597 
598  template<typename _T, int _Rank>
599  struct _product_helper<_T, _Rank, true /* _Unroll */>
600  {
601  template<typename _T1>
602  static typename _T::value_type func(const _T1 & a) __GPU
603  {
604  return _product_helper<_T, _Rank - 1>::func(a) * a[_Rank - 1];
605  }
606  };
607 
608  template<typename _T, int _Rank>
609  struct _product_helper<_T, _Rank, false /* _Unroll */>
610  {
611  template<typename _T1>
612  static typename _T::value_type func(const _T1 & a) __GPU
613  {
614  typename _T::value_type _e = a[0];
615  for (int i = 1; i < _Rank; i++)
616  {
617  _e *= a[i];
618  }
619  return _e;
620  }
621  };
622 
623  template<typename _T1, int _Rank = _T1::rank>
624  struct _map_index;
625 
626 #pragma warning( push )
627 #pragma warning( disable : 4100 ) // unreferenced formal parameter
628  template<typename _T1>
629  struct _map_index<_T1, 1>
630  {
631  template<typename _T2>
632  static _T1 func(int _Flat_index, const _T2 _Base) __GPU
633  {
634  _T1 _index = _Create_uninitialized_tuple<_T1>();
635  _index[0] = _Flat_index;
636  return _index;
637  }
638  };
639 
640  template<typename _T1>
641  struct _map_index<_T1, 2>
642  {
643  template<typename _T2>
644  static _T1 func(int _Flat_index, const _T2 _Base) __GPU
645  {
646  _T1 _index = _Create_uninitialized_tuple<_T1>();
647  _index[1] = static_cast<unsigned int>(_Flat_index) % static_cast<unsigned int>(_Base[1]);
648  _index[0] = static_cast<unsigned int>(_Flat_index) / static_cast<unsigned int>(_Base[1]);
649  return _index;
650  }
651  };
652 
653  template<typename _T1>
654  struct _map_index<_T1, 3>
655  {
656  template<typename _T2>
657  static _T1 func(int _Flat_index, const _T2 _Base) __GPU
658  {
659  _T1 _index = _Create_uninitialized_tuple<_T1>();
660 
661  _index[2] = static_cast<unsigned int>(_Flat_index) % static_cast<unsigned int>(_Base[2]);
662  _Flat_index = static_cast<unsigned int>(_Flat_index) / static_cast<unsigned int>(_Base[2]);
663  _index[1] = static_cast<unsigned int>(_Flat_index) % static_cast<unsigned int>(_Base[1]);
664  _index[0] = static_cast<unsigned int>(_Flat_index) / static_cast<unsigned int>(_Base[1]);
665  return _index;
666  }
667  };
668 
669  template<typename _T1>
670  struct _map_index<_T1, 4>
671  {
672  template<typename _T2>
673  static _T1 func(int _Flat_index, const _T2 _Base) __GPU
674  {
675  _T1 _index = _Create_uninitialized_tuple<_T1>();
676  _index[3] = static_cast<unsigned int>(_Flat_index) % static_cast<unsigned int>(_Base[3]);
677  _Flat_index = static_cast<unsigned int>(_Flat_index) / static_cast<unsigned int>(_Base[3]);
678  _index[2] = static_cast<unsigned int>(_Flat_index) % static_cast<unsigned int>(_Base[2]);
679  _Flat_index = static_cast<unsigned int>(_Flat_index) / static_cast<unsigned int>(_Base[2]);
680  _index[1] = static_cast<unsigned int>(_Flat_index) % static_cast<unsigned int>(_Base[1]);
681  _index[0] = static_cast<unsigned int>(_Flat_index) / static_cast<unsigned int>(_Base[1]);
682  return _index;
683  }
684  };
685 
686  template<typename _T1, int _Rank>
687  struct _map_index
688  {
689  template<typename _T2>
690  static _T1 func(int _Flat_index, const _T2 _Base) __GPU
691  {
692  _T1 _index = _Create_uninitialized_tuple<_T1>();
693  for (int i = _Rank - 1; i > 0; --i)
694  {
695  _index[i] = static_cast<unsigned int>(_Flat_index) % static_cast<unsigned int>(_Base[i]);
696  _Flat_index = static_cast<unsigned int>(_Flat_index) / static_cast<unsigned int>(_Base[i]);
697  }
698  _index[0] = _Flat_index;
699  return _index;
700  }
701  };
702 #pragma warning( pop )
703 
704  // Helper class for unrolling extent<N>::contains
705 
706 #pragma warning( push )
707 #pragma warning( disable : 4100 ) // unreferenced formal parameter
708 
709  template<typename _EXT, typename _IDX, int _R>
710  struct _contains
711  {
712  static bool func(const _EXT& _Ext, const _IDX& _Idx) __GPU
713  {
714  for (int _I=0; _I<_R; _I++)
715  {
716  if ((_Idx[_I] < 0) | (_Idx[_I] >= _Ext[_I]))
717  return false;
718  }
719  return true;
720  }
721  };
722 
723  template<typename _EXT, typename _IDX>
724  struct _contains<_EXT,_IDX,1>
725  {
726  static bool func(const _EXT& _Ext, const _IDX& _Idx) __GPU
727  {
728  return (_Idx[0] >= 0) & (_Idx[0] < _Ext[0]);
729  }
730  };
731 
732  template<typename _EXT, typename _IDX>
733  struct _contains<_EXT,_IDX,2>
734  {
735  static bool func(const _EXT& _Ext, const _IDX& _Idx) __GPU
736  {
737  return (_Idx[0] >= 0) & (_Idx[0] < _Ext[0]) &
738  (_Idx[1] >= 0) & (_Idx[1] < _Ext[1]);
739  }
740  };
741 
742  template<typename _EXT, typename _IDX>
743  struct _contains<_EXT,_IDX,3>
744  {
745  static bool func(const _EXT& _Ext, const _IDX& _Idx) __GPU
746  {
747  return (_Idx[0] >= 0) & (_Idx[0] < _Ext[0]) &
748  (_Idx[1] >= 0) & (_Idx[1] < _Ext[1]) &
749  (_Idx[2] >= 0) & (_Idx[2] < _Ext[2]);
750  }
751  };
752 
753 #pragma warning( pop )
754 
755  // Helper class for loop unrolling of array projection
756 
757 #pragma warning( push )
758 #pragma warning( disable : 4100 ) // unreferenced formal parameter
759 
760  template<typename _RES_EXT, typename _SRC_EXT, typename _RES_IDX, typename _SRC_IDX, int _R>
761  struct _project0
762  {
763  static_assert(_RES_EXT::rank == _R-1, "Result extent rank must be _R-1");
764  static_assert(_SRC_EXT::rank == _R, "Source extent rank must be _R");
765  static_assert(_RES_IDX::rank == _R-1, "Result index rank must be _R-1");
766  static_assert(_SRC_IDX::rank == _R, "Source index rank must be _R");
767 
768  static void func(_RES_EXT& _ResArrayExtent, const _SRC_EXT& _SrcArrayExtent,
769  _RES_EXT& _ResArrayMultiplier, const _SRC_EXT& _SrcArrayMultiplier,
770  _RES_IDX& _ResViewOffset, const _SRC_IDX& _SrcViewOffset,
771  _RES_EXT& _ResViewExtent, const _SRC_EXT& _SrcViewExtent) __GPU
772  {
773  for (int _I=0; _I<=_R-3; _I++)
774  {
775  _ResArrayExtent [_I] = _SrcArrayExtent [_I+1];
776  _ResArrayMultiplier[_I] = _SrcArrayMultiplier[_I+1];
777  _ResViewOffset [_I] = _SrcViewOffset [_I+1];
778  _ResViewExtent [_I] = _SrcViewExtent [_I+1];
779  }
780 
781  _ResArrayExtent [_R-2] = _SrcArrayExtent [_R-1];
782  _ResViewOffset [_R-2] = _SrcViewOffset [_R-1];
783  _ResViewExtent [_R-2] = _SrcViewExtent [_R-1];
784  }
785 
786  };
787 
788  template<typename _RES_EXT, typename _SRC_EXT, typename _RES_IDX, typename _SRC_IDX>
789  struct _project0<_RES_EXT,_SRC_EXT,_RES_IDX,_SRC_IDX,2>
790  {
791  static void func(_RES_EXT& _ResArrayExtent, const _SRC_EXT& _SrcArrayExtent,
792  _RES_EXT& /*_ResArrayMultiplier*/, const _SRC_EXT& /*_SrcArrayMultiplier*/,
793  _RES_IDX& _ResViewOffset, const _SRC_IDX& _SrcViewOffset,
794  _RES_EXT& _ResViewExtent, const _SRC_EXT& _SrcViewExtent) __GPU
795  {
796  _ResArrayExtent[0] = _SrcArrayExtent[1];
797  _ResViewOffset [0] = _SrcViewOffset [1];
798  _ResViewExtent [0] = _SrcViewExtent [1];
799  }
800  };
801 
802  template<typename _RES_EXT, typename _SRC_EXT, typename _RES_IDX, typename _SRC_IDX>
803  struct _project0<_RES_EXT,_SRC_EXT,_RES_IDX,_SRC_IDX,3>
804  {
805  static void func(_RES_EXT& _ResArrayExtent, const _SRC_EXT& _SrcArrayExtent,
806  _RES_EXT& _ResArrayMultiplier, const _SRC_EXT& _SrcArrayMultiplier,
807  _RES_IDX& _ResViewOffset, const _SRC_IDX& _SrcViewOffset,
808  _RES_EXT& _ResViewExtent, const _SRC_EXT& _SrcViewExtent) __GPU
809  {
810  _ResArrayExtent [0] = _SrcArrayExtent [1];
811  _ResArrayMultiplier[0] = _SrcArrayMultiplier[1];
812  _ResViewOffset [0] = _SrcViewOffset [1];
813  _ResViewExtent [0] = _SrcViewExtent [1];
814 
815  _ResArrayExtent [1] = _SrcArrayExtent [2];
816  _ResViewOffset [1] = _SrcViewOffset [2];
817  _ResViewExtent [1] = _SrcViewExtent [2];
818  }
819 
820  };
821 
822 #pragma warning( pop )
823 
824 
825  // helper class for loop unrolling.
826  template<typename _T1, typename _T2, int _Rank = _T2::rank>
828 
829 #pragma warning( push )
830 #pragma warning( disable : 4100 ) // unreferenced formal parameter
831  template<typename _T1, typename _T2>
832  struct _Array_init_helper<_T1, _T2, 1>
833  {
834  static void func(unsigned int & _Total_extent, _T1 & _Multiplier, const _T2 & _Extent) __GPU
835  {
836  return;
837  }
838  };
839 #pragma warning( pop )
840 
841  template<typename _T1, typename _T2>
842  struct _Array_init_helper<_T1, _T2, 2>
843  {
844  static void func(unsigned int & _Total_extent, _T1 & _Multiplier, const _T2 & _Extent) __GPU
845  {
846  _Multiplier[0] = _Total_extent;
847  _Total_extent *= _Extent[0];
848  }
849  };
850 
851  template<typename _T1, typename _T2>
852  struct _Array_init_helper<_T1, _T2, 3>
853  {
854  static void func(unsigned int & _Total_extent, _T1 & _Multiplier, const _T2 & _Extent) __GPU
855  {
856  _Multiplier[1] = _Total_extent;
857  _Total_extent *= _Extent[1];
858  _Multiplier[0] = _Total_extent;
859  _Total_extent *= _Extent[0];
860  }
861  };
862 
863  template<typename _T1, typename _T2>
864  struct _Array_init_helper<_T1, _T2, 4>
865  {
866  static void func(unsigned int & _Total_extent, _T1 & _Multiplier, const _T2 & _Extent) __GPU
867  {
868  _Multiplier[2] = _Total_extent;
869  _Total_extent *= _Extent[2];
870  _Multiplier[1] = _Total_extent;
871  _Total_extent *= _Extent[1];
872  _Multiplier[0] = _Total_extent;
873  _Total_extent *= _Extent[0];
874  }
875  };
876 
877  template<typename _T1, typename _T2, int _Rank>
878  struct _Array_init_helper
879  {
880  static void func(unsigned int & _Total_extent, _T1 & _Multiplier, const _T2 & _Extent) __GPU
881  {
882  _Multiplier[_Rank-2] = _Total_extent;
883  for (int i = _Rank-2; i >= 1; --i) {
884  _Total_extent *= _Extent[i];
885  _Multiplier[i-1] = _Total_extent;
886  }
887  _Total_extent *= _Extent[0];
888  }
889  };
890 
891  template<int _Rank, typename _T1, typename _T2>
893 
894  template<typename _T1, typename _T2>
895  struct _Array_flatten_helper<1, _T1, _T2>
896  {
897  static _T2 func(const _T1 * /*_Multiplier*/, const _T2 *_Index) __GPU
898  {
899  return _Index[0];
900  }
901  };
902 
903  template<typename _T1, typename _T2>
904  struct _Array_flatten_helper<2, _T1, _T2>
905  {
906  static _T2 func(const _T1 *_Multiplier, const _T2 *_Index) __GPU
907  {
908  return ((_Multiplier[0] * _Index[0]) + _Index[1]);
909  }
910  };
911 
912  template<typename _T1, typename _T2>
913  struct _Array_flatten_helper<3, _T1, _T2>
914  {
915  static _T2 func(const _T1 *_Multiplier, const _T2 *_Index) __GPU
916  {
917  return ((_Multiplier[0] * _Index[0]) + (_Multiplier[1] * _Index[1]) + _Index[2]);
918  }
919  };
920 
921  template<int _Rank, typename _T1, typename _T2>
922  struct _Array_flatten_helper
923  {
924  static _T2 func(const _T1 *_Multiplier, const _T2 *_Index) __GPU
925  {
926  typename _T2 _Offset = _Index[_Rank - 1];
927  for (int _I = 0; _I < (_Rank - 1); _I++)
928  {
929  _Offset += (_Multiplier[_I] * _Index[_I]);
930  }
931  return _Offset;
932  }
933  };
934 
935 
936  template<typename _T, int _Rank>
938 
939  // rank == 1
940  template<typename _T>
942  {
943  static void func(const void * _Tex_data, _Out_ void * _Val, const _T & _Index, unsigned int _Mip_level) __GPU_ONLY
944  {
945  __dp_read_texture(_Tex_data, _Val, static_cast<unsigned int>(_Index[0]), 1, 1, _Mip_level);
946  }
947  };
948 
949  template<typename _T>
951  {
952  static void func(const void * _Tex_data, _Out_ void * _Val, const _T & _Index, unsigned int _Mip_level) __GPU_ONLY
953  {
954  __dp_read_texture(_Tex_data, _Val, static_cast<unsigned int>(_Index[1]), static_cast<unsigned int>(_Index[0]), 1, _Mip_level);
955  }
956  };
957 
958  template<typename _T>
960  {
961  static void func(const void * _Tex_data, _Out_ void * _Val, const _T & _Index, unsigned int _Mip_level) __GPU_ONLY
962  {
963  __dp_read_texture(_Tex_data, _Val, static_cast<unsigned int>(_Index[2]), static_cast<unsigned int>(_Index[1]), static_cast<unsigned int>(_Index[0]), _Mip_level);
964  }
965  };
966 
967  template<typename _T, int _Rank>
969 
970  // rank == 1
971  template<typename _T>
973  {
974  static void func(_Out_ void * _Tex_data, const void * _Ret, const _T & _Index) __GPU_ONLY
975  {
976  __dp_write_texture(_Tex_data, _Ret, static_cast<unsigned int>(_Index[0]), 1, 1);
977  }
978  };
979 
980  template<typename _T>
982  {
983  static void func(_Out_ void * _Tex_data, const void * _Ret, const _T & _Index) __GPU_ONLY
984  {
985  __dp_write_texture(_Tex_data, _Ret, static_cast<unsigned int>(_Index[1]), static_cast<unsigned int>(_Index[0]), 1);
986  }
987  };
988 
989  template<typename _T>
991  {
992  static void func(_Out_ void * _Tex_data, const void * _Ret, const _T & _Index) __GPU_ONLY
993  {
994  __dp_write_texture(_Tex_data, _Ret, static_cast<unsigned int>(_Index[2]), static_cast<unsigned int>(_Index[1]), static_cast<unsigned int>(_Index[0]));
995  }
996  };
997 
998  template<typename _T, int _Rank>
1000 
1001  // rank == 1
1002  template<typename _T>
1004  {
1005  static void func(const void * _Tex_data, const void* _Sampler, _Out_ void * _Val, const _T & _Coord, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
1006  {
1007  __dp_sample_texture(_Tex_data, _Sampler, _Val, _Coord, 1.0f, 1.0f, _Kind, _Level_of_detail);
1008  }
1009  };
1010 
1011  template<typename _T>
1013  {
1014  static void func(const void * _Tex_data, const void* _Sampler, _Out_ void * _Val, const _T & _Coord, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
1015  {
1016  __dp_sample_texture(_Tex_data, _Sampler, _Val, _Coord.x, _Coord.y, 1.0f, _Kind, _Level_of_detail);
1017  }
1018  };
1019 
1020  template<typename _T>
1022  {
1023  static void func(const void * _Tex_data, const void* _Sampler, _Out_ void * _Val, const _T & _Coord, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
1024  {
1025  __dp_sample_texture(_Tex_data, _Sampler, _Val, _Coord.x, _Coord.y, _Coord.z, _Kind, _Level_of_detail);
1026  }
1027  };
1028 
1029  template<typename _T, int _Rank>
1031 
1032  // rank == 1
1033  template<typename _T>
1035  {
1036  static void func(const void * _Tex_data, _Out_ void * _Val, const _T & _Coord, unsigned int _Predefined_sampler_id, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
1037  {
1038  __dp_sample_texture_predefined(_Tex_data, _Val, _Coord, 1.0f, 1.0f, _Predefined_sampler_id, _Kind, _Level_of_detail);
1039  }
1040  };
1041 
1042  template<typename _T>
1044  {
1045  static void func(const void * _Tex_data, _Out_ void * _Val, const _T & _Coord, unsigned int _Predefined_sampler_id,unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
1046  {
1047  __dp_sample_texture_predefined(_Tex_data, _Val, _Coord.x, _Coord.y, 1.0f, _Predefined_sampler_id, _Kind, _Level_of_detail);
1048  }
1049  };
1050 
1051  template<typename _T>
1053  {
1054  static void func(const void * _Tex_data, _Out_ void * _Val, const _T & _Coord, unsigned int _Predefined_sampler_id, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
1055  {
1056  __dp_sample_texture_predefined(_Tex_data, _Val, _Coord.x, _Coord.y, _Coord.z, _Predefined_sampler_id, _Kind, _Level_of_detail);
1057  }
1058  };
1059 
1060 
1061  // Retrieve these fields from arrays
1062  template <typename _Array_type>
1063  const _Buffer_descriptor& _Get_buffer_descriptor(const _Array_type& _Array) __GPU
1064  {
1065  return _Array._M_buffer_descriptor;
1066  }
1067 
1068  template <typename _Array_type>
1069  _Ret_ _Ubiquitous_buffer* _Get_buffer(const _Array_type& _Array) __CPU_ONLY
1070  {
1071  return _Array._Get_buffer();
1072  }
1073 
1074  // Helper method to access the private _Get_access method of the array
1075  // object which gets access to the underlying buffer on the array's accelerator_view
1076  // by synchronizing any pending modifications on other accelerator_views made through
1077  // array_views on the array
1078  template <typename _Array_type>
1079  _Event _Get_access_async(const _Array_type& _Array, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr) __CPU_ONLY
1080  {
1081  return _Array._Get_access_async(_Mode, _Buf_ptr);
1082  }
1083 
1084  // Helper method to obtain a unique ID for identifying the data source
1085  // underlying the array/array_view argument
1086  template <typename _Array_type>
1087  void *_Get_datasource_identity(const _Array_type &_Array)
1088  {
1089  return _Get_buffer_descriptor(_Array)._Get_buffer_ptr()._Get_ptr();
1090  }
1091 
1092  // Retrieve these fields from textures
1093  template <typename _Texture_type>
1094  const _Texture_descriptor& _Get_texture_descriptor(const _Texture_type& _Tex) __GPU
1095  {
1096  return _Tex._M_texture_descriptor;
1097  }
1098 
1099  template <typename _Texture_type>
1100  _Ret_ _Texture* _Get_texture(const _Texture_type& _Tex) __CPU_ONLY
1101  {
1102  return _Tex._Get_texture();
1103  }
1104 
1105  template<int _Rank, template <int> class _T1, template <int> class _T2>
1106  static void _Is_valid_section(
1107  const _T2<_Rank>& _Base_extent,
1108  const _T1<_Rank>& _Section_origin,
1109  const _T2<_Rank>& _Section_extent) __CPU_ONLY
1110  {
1111  _Is_nonnegative(_Section_origin);
1112  _Is_positive(_Section_extent);
1113 
1114  for (int i = 0; i < _Rank; ++i)
1115  {
1116  if ((_Section_origin[i] + _Section_extent[i]) > _Base_extent[i]) {
1117  throw runtime_exception("the specified section index and extent are out of bound", E_INVALIDARG);
1118  }
1119  }
1120  }
1121 
1122  template<int _Rank, template <int> class _T1, template <int> class _T2>
1123  static void _Is_valid_section(
1124  const _T2<_Rank>& /*_Base_extent*/,
1125  const _T1<_Rank>& /*_Section_origin*/,
1126  const _T2<_Rank>& /*_Section_extent*/) __GPU_ONLY
1127  {
1128  };
1129 
1130  template<int _Rank, template <int> class _T1>
1131  static void _Is_valid_projection(int _I, const _T1<_Rank>& _Base_extent) __CPU_ONLY
1132  {
1133  if ((_I < 0) || (_I >= _Base_extent[0])) {
1134  throw runtime_exception("the specified projection index is out of bound", E_INVALIDARG);
1135  }
1136  }
1137 
1138  template<int _Rank, template <int> class _T1>
1139  static void _Is_valid_projection(int /*_I*/, const _T1<_Rank>& /*_Base_extent*/) __GPU_ONLY
1140  {
1141  }
1142 
1143  template<int _Rank, template <int> class _T>
1144  static void _Is_positive(const _T<_Rank> &_Tuple) __CPU_ONLY
1145  {
1146  bool valid = true;
1147  for (int i = 0; i < _Rank; ++i)
1148  {
1149  if (_Tuple[i] <= 0) {
1150  valid = false;
1151  break;
1152  }
1153  }
1154 
1155  if (!valid) {
1156  throw runtime_exception("Invalid - values for each dimension must be > 0", E_INVALIDARG);
1157  }
1158  }
1159 
1160  // The GPU version is a no-op for since there is no good exception-like mechanism on GPU
1161  template<int _Rank, template <int> class _T>
1162  static void _Is_positive(const _T<_Rank> &/*_Tuple*/) __GPU_ONLY
1163  {
1164  }
1165 
1166 
1167  template<int _Rank, template <int> class _T>
1168  static void _Is_nonnegative(const _T<_Rank> &_Tuple) __CPU_ONLY
1169  {
1170  bool valid = true;
1171  for (int i = 0; i < _Rank; ++i)
1172  {
1173  if (_Tuple[i] < 0) {
1174  valid = false;
1175  break;
1176  }
1177  }
1178 
1179  if (!valid) {
1180  throw runtime_exception("Invalid - values for each dimension must be >= 0", E_INVALIDARG);
1181  }
1182  }
1183 
1184  // The GPU version is a no-op for since there is no good exception-like mechanism on GPU
1185  template<int _Rank, template <int> class _T>
1186  static void _Is_nonnegative(const _T<_Rank> & /*_Tuple*/) __GPU_ONLY
1187  {
1188  }
1189 
1190  // An extent is valid if the value in each dimension is > 0 and the
1191  // size of the extent does not exceed UINT_MAX
1192  // Important: This needs to be revisited whenever we change the return
1193  // type of extent.size()
1194  template<int _Rank, template <int> class _T>
1195  static void _Is_valid_extent(const _T<_Rank> &_Tuple) __CPU_ONLY
1196  {
1197  _Is_positive(_Tuple);
1198 
1199  bool totalSizeValid = true;
1200  unsigned long long totalSize = (unsigned long long)_Tuple[0];
1201 #pragma warning( push )
1202 #pragma warning( disable : 6294 )
1203  for (int i = 1; i < _Rank; ++i)
1204  {
1205  totalSize *= (unsigned long long)_Tuple[i];
1206  if (totalSize > UINT_MAX) {
1207  totalSizeValid = false;
1208  break;
1209  }
1210  }
1211 #pragma warning( pop )
1212 
1213  if (!totalSizeValid) {
1214  throw runtime_exception("Invalid - extent size exceeds UINT_MAX", E_INVALIDARG);
1215  }
1216  }
1217 
1218  // The GPU version is a no-op for since there is no good exception-like mechanism on GPU
1219  template<int _Rank, template <int> class _T>
1220  static void _Is_valid_extent(const _T<_Rank> & /*_Tuple*/) __GPU_ONLY
1221  {
1222  }
1223 
1224  template<int _Rank>
1225  inline unsigned int _Get_max_mipmap_levels(const extent<_Rank> &_Extent)
1226  {
1227  unsigned int _Mipmap_levels = 0;
1228 
1229  // Find the largest dimension
1230  unsigned int _Max_dim = static_cast<unsigned int>(_Extent[0]);
1231  for(int _I=1; _I<_Rank; ++_I)
1232  {
1233  _Max_dim = static_cast<unsigned int>(_Extent[_I]) > _Max_dim ? static_cast<unsigned int>(_Extent[_I]) : _Max_dim;
1234  }
1235 
1236  // Find out how many times we can divide it by 2
1237  while(_Max_dim > 0)
1238  {
1239  _Mipmap_levels++;
1240  _Max_dim >>= 1;
1241  }
1242 
1243  return _Mipmap_levels;
1244  }
1245 
1246  inline void _Are_valid_mipmap_parameters(unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels = 0)
1247  {
1248  if (_Most_detailed_mipmap_level >= 32)
1249  {
1250  throw runtime_exception("The most detailed mipmap level cannot be greater than or equal to 32", E_INVALIDARG);
1251  }
1252 
1253  if (_Mipmap_levels > 32)
1254  {
1255  throw runtime_exception("The number of mipmap levels cannot be greater than 32", E_INVALIDARG);
1256  }
1257  }
1258 
1259  template<int _Rank>
1260  inline extent<_Rank> _Get_extent_at_level_unsafe(const extent<_Rank> &_Base_extent, unsigned int _Level) __GPU;
1261 
1262  template<int _Rank>
1263  inline extent<_Rank> _Get_extent_at_level(const extent<_Rank> &_Base_extent, unsigned int _Level);
1264 
1265  // Validate most detailed level and mipmap levels of the new view, given number of mipmap levels on the source
1266  inline void _Is_valid_mipmap_range(unsigned int _Src_view_mipmap_levels, unsigned int _Dst_most_detailed_level, unsigned int _Dst_view_mipmap_levels)
1267  {
1268  _Are_valid_mipmap_parameters(_Dst_most_detailed_level, _Dst_view_mipmap_levels);
1269 
1270  if (_Dst_view_mipmap_levels == 0 || _Src_view_mipmap_levels < _Dst_most_detailed_level + _Dst_view_mipmap_levels)
1271  {
1272  throw runtime_exception("Invalid texture mipmap range", E_INVALIDARG);
1273  }
1274  }
1275 
1276  // _Parallel_for_each declarations used by the Concurrency::parallel_for_each functions
1277  template <int _Rank, typename _Kernel_type>
1278  void _Parallel_for_each(_In_ _Host_Scheduling_info *_Sch_info, extent<_Rank> _Compute_domain, const _Kernel_type &_F);
1279 
1280  template <int _Dim0, int _Dim1, int _Dim2, typename _Kernel_type>
1281  void _Parallel_for_each(_In_ _Host_Scheduling_info *_Sch_info, tiled_extent<_Dim0, _Dim1, _Dim2> _Compute_domain, const _Kernel_type &_F);
1282 
1283  template <int _Dim0, int _Dim1, typename _Kernel_type>
1284  void _Parallel_for_each(_In_ _Host_Scheduling_info *_Sch_info, tiled_extent<_Dim0, _Dim1> _Compute_domain, const _Kernel_type &_F);
1285 
1286  template <int _Dim0, typename _Kernel_type>
1287  void _Parallel_for_each(_In_ _Host_Scheduling_info *_Sch_info, tiled_extent<_Dim0> _Compute_domain, const _Kernel_type &_F);
1288 
1289 } // namespace Concurrency::details
1290 
1291 } // namespace Concurrency
1292 
1293 //=============================================================================
1294 // Internal Intrinsic Functions used for implementing libraries
1295 //=============================================================================
1296 
1297 extern "C"
1298 {
1299 
1300 //=============================================================================
1301 // Intrinsics that maps to D3D HLSL intrinsics
1302 //=============================================================================
1303 int __dp_d3d_absi(int) __GPU_ONLY;
1304 void __dp_d3d_all_memory_fence() __GPU_ONLY;
1306 float __dp_d3d_clampf(float, float, float) __GPU_ONLY;
1307 int __dp_d3d_clampi(int, int, int) __GPU_ONLY;
1308 unsigned int __dp_d3d_countbitsu(unsigned int) __GPU_ONLY;
1309 void __dp_d3d_device_memory_fence() __GPU_ONLY;
1311 int __dp_d3d_firstbithighi(int) __GPU_ONLY;
1312 int __dp_d3d_firstbitlowi(int) __GPU_ONLY;
1313 unsigned int __dp_d3d_interlocked_add(_Inout_ unsigned int *, unsigned int) __GPU_ONLY;
1314 unsigned int __dp_d3d_interlocked_and(_Inout_ unsigned int *, unsigned int) __GPU_ONLY;
1315 unsigned int __dp_d3d_interlocked_exchange(_Inout_ unsigned int *, unsigned int) __GPU_ONLY;
1316 unsigned int __dp_d3d_interlocked_or(_Inout_ unsigned int *, unsigned int) __GPU_ONLY;
1317 unsigned int __dp_d3d_interlocked_xor(_Inout_ unsigned int *, unsigned int) __GPU_ONLY;
1318 unsigned int __dp_d3d_interlocked_compare_exchange(_Inout_ unsigned int *, unsigned int, unsigned int) __GPU_ONLY;
1319 unsigned int __dp_d3d_interlocked_max_uint(_Inout_ unsigned int *, unsigned int) __GPU_ONLY;
1320 int __dp_d3d_interlocked_max_int(_Inout_ int *, int) __GPU_ONLY;
1321 unsigned int __dp_d3d_interlocked_min_uint(_Inout_ unsigned int *, unsigned int) __GPU_ONLY;
1322 int __dp_d3d_interlocked_min_int(_Inout_ int *, int) __GPU_ONLY;
1323 float __dp_d3d_madf(float, float, float) __GPU_ONLY;
1324 double __dp_d3d_madd(double, double, double) __GPU_ONLY;
1325 int __dp_d3d_madi(int, int, int) __GPU_ONLY;
1326 int __dp_d3d_mini(int, int) __GPU_ONLY;
1327 int __dp_d3d_maxi(int, int) __GPU_ONLY;
1328 unsigned int __dp_d3d_minu(unsigned int, unsigned int) __GPU_ONLY;
1329 unsigned int __dp_d3d_maxu(unsigned int, unsigned int) __GPU_ONLY;
1330 unsigned int __dp_d3d_madu(unsigned int, unsigned int, unsigned int) __GPU_ONLY;
1331 float __dp_d3d_noisef(float) __GPU_ONLY;
1332 float __dp_d3d_radiansf(float) __GPU_ONLY;
1333 float __dp_d3d_rcpf(float) __GPU_ONLY;
1334 unsigned int __dp_d3d_reversebitsu(unsigned int) __GPU_ONLY;
1335 float __dp_d3d_saturatef (float) __GPU_ONLY;
1336 int __dp_d3d_signi(int) __GPU_ONLY;
1337 float __dp_d3d_smoothstepf(float, float, float) __GPU_ONLY;
1338 float __dp_d3d_stepf(float, float) __GPU_ONLY;
1339 void __dp_d3d_tile_static_memory_fence() __GPU_ONLY;
1341 void __dp_d3d_msad4(_Out_ unsigned int * /* pointer to the return value */,
1342  unsigned int /* reference */,
1343  unsigned int /* source.x */,
1344  unsigned int /* source.y */,
1345  unsigned int /* accum.x */,
1346  unsigned int /* accum.y */,
1347  unsigned int /* accum.z */,
1348  unsigned int /* accum.w */) __GPU_ONLY;
1349 
1350 //=============================================================================
1351 // Intrinsics that serves as FE/BE interface
1352 //=============================================================================
1353 
1354 // C++ AMP stub only internal intrinsics
1355 void __dp_stub_info(unsigned int /* x */,
1356  unsigned int /* y */,
1357  unsigned int /* z */,
1358  unsigned int /* group forall? */) __GPU_ONLY;
1359 
1360 _Ret_ void * __dp_get_buffer_info(bool /* read write? */,
1361  unsigned int /* unique id */) __GPU_ONLY;
1362 
1363 _Ret_ void * __dp_get_texture_info(bool /* read write? */,
1364  unsigned int, /* rank */
1365  unsigned int, /* base type: 0 - int, 1 - uint, 2 - float*/
1366  unsigned int, /* num of channels */
1367  unsigned int /* unique id */) __GPU_ONLY;
1368 
1369 _Ret_ void * __dp_get_sampler_info(unsigned int /* unique id */) __GPU_ONLY;
1370 
1371 void __dp_init_entry_var(_Out_ unsigned int * /* pointer to the entry symbol */,
1372  unsigned int /* constant buffer id */,
1373  unsigned int /* start pos */,
1374  unsigned int /* end pos */) __GPU_ONLY;
1375 
1376 void __dp_entry_var_ptr_info(unsigned int /* pos of a ptr */) __GPU_ONLY;
1377 
1378 void __dp_const_buffer_info(unsigned int /* unique id */, unsigned int /* size */) __GPU_ONLY;
1379 
1380 unsigned int __dp_read_const_buffer(unsigned int /* const buffer id */, unsigned int /* index */) __GPU_ONLY;
1381 
1382 unsigned int __dp_get_physical_id(
1383  unsigned int /* 0 - gid, 1 - tid, 2 - dtid */,
1384  unsigned int /* 0 - x, 1 - y, 2 - z */) __GPU_ONLY;
1385 
1386 // This intrinsic is used to aid line number debug info generation for C++ AMP kernels
1387 void __dp_no_source_stub() __GPU_ONLY;
1388 
1389 // This intrinsic is used to pass the call site info
1390 void __dp_call_site_info(const char *, int) __GPU_ONLY;
1391 }
1392 
1393 namespace Concurrency
1394 {
1395  namespace details
1396  {
1397  // This function is used to aid line number debug info generation for C++ AMP kernels
1398  // and is called by the C++ AMP FE in the generated kernel_stub for parallel_for_each calls.
1399  inline void __dp_no_source_func() __GPU_ONLY
1400  {
1402  }
1403  }
1404 }
1405 
1406 namespace concurrency = Concurrency;
1407 
1408 #pragma pack(pop)
#define _Out_
Definition: sal.h:342
int __dp_d3d_interlocked_min_int(_Inout_ int *, int) __GPU_ONLY
Definition: xxamp.h:229
static void func(_T &a, const _T &b) __GPU
Definition: xxamp.h:291
static _Projection_result_type< _T, _R >::_Result_type _Project0(const array_view< _T, _R > *_Arr_view, int _I) __GPU
Definition: xxamp_inl.h:42
int __dp_d3d_firstbitlowi(int) __GPU_ONLY
static void func(_T &a, const _T &b, const _T &c) __GPU
Definition: xxamp.h:355
static void func(const void *_Tex_data, _Out_ void *_Val, const _T &_Coord, unsigned int _Predefined_sampler_id, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
Definition: xxamp.h:1045
float __dp_d3d_radiansf(float) __GPU_ONLY
unsigned int __dp_d3d_interlocked_add(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
static void func(_T &a, const _T &b, const _T &c) __GPU
Definition: xxamp.h:361
unsigned int __dp_get_physical_id(unsigned int, unsigned int) __GPU_ONLY
_Tuple_type _Create_uninitialized_tuple() __GPU
Definition: xxamp.h:214
Definition: amprt.h:2369
static void func(unsigned int &_Total_extent, _T1 &_Multiplier, const _T2 &_Extent) __GPU
Definition: xxamp.h:844
array_view< const _T, _R-1 > _Const_result_type
Definition: xxamp.h:113
extent< _Rank > _Get_extent_at_level(const extent< _Rank > &_Base_extent, unsigned int _Level)
Definition: xxamp_inl.h:141
float __dp_d3d_madf(float, float, float) __GPU_ONLY
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
static void func(unsigned int &_Total_extent, _T1 &_Multiplier, const _T2 &_Extent) __GPU
Definition: xxamp.h:880
static bool func(const _T &a, const _T &b) __GPU
Definition: xxamp.h:275
Definition: xxamp.h:242
Definition: xxamp.h:236
unsigned int __dp_d3d_interlocked_xor(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
static _T2 func(const _T1 *_Multiplier, const _T2 *_Index) __GPU
Definition: xxamp.h:915
Definition: xxamp.h:104
static void func(_T &a, const _T &b, const _T &c) __GPU
Definition: xxamp.h:367
_Ret_ _Texture * _Get_texture(const _Texture_type &_Tex) __CPU_ONLY
Definition: xxamp.h:1100
int __dp_d3d_clampi(int, int, int) __GPU_ONLY
unsigned int __dp_d3d_madu(unsigned int, unsigned int, unsigned int) __GPU_ONLY
_eInitializeState
Definition: xxamp.h:208
_In_ long
Definition: corecrt_wstdlib.h:88
Definition: array:20
static void func(_Out_ void *_Tex_data, const void *_Ret, const _T &_Index) __GPU_ONLY
Definition: xxamp.h:983
static void func(_T &a, typename _T::value_type b) __GPU
Definition: xxamp.h:515
unsigned int __dp_d3d_interlocked_or(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
#define __GPU
Definition: amprt.h:45
Definition: xxamp.h:238
void __dp_call_site_info(const char *, int) __GPU_ONLY
int __dp_d3d_maxi(int, int) __GPU_ONLY
static void func(const void *_Tex_data, _Out_ void *_Val, const _T &_Index, unsigned int _Mip_level) __GPU_ONLY
Definition: xxamp.h:943
Definition: xxamp.h:761
The Concurrency namespace provides classes and functions that provide access to the Concurrency Runti...
Definition: agents.h:43
static void _Is_valid_extent(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1195
unsigned int __dp_d3d_maxu(unsigned int, unsigned int) __GPU_ONLY
float __dp_d3d_smoothstepf(float, float, float) __GPU_ONLY
void _Are_valid_mipmap_parameters(unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
Definition: xxamp.h:1246
static void func(const void *_Tex_data, const void *_Sampler, _Out_ void *_Val, const _T &_Coord, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
Definition: xxamp.h:1023
unsigned int __dp_read_const_buffer(unsigned int, unsigned int) __GPU_ONLY
Definition: xxamp.h:231
float __dp_d3d_clampf(float, float, float) __GPU_ONLY
_Ret_ void * __dp_get_texture_info(bool, unsigned int, unsigned int, unsigned int, unsigned int) __GPU_ONLY
double __dp_d3d_madd(double, double, double) __GPU_ONLY
static bool func(const _EXT &_Ext, const _IDX &_Idx) __GPU
Definition: xxamp.h:735
void __dp_stub_info(unsigned int, unsigned int, unsigned int, unsigned int) __GPU_ONLY
void __dp_d3d_tile_static_memory_fence_with_tile_barrier() __GPU_ONLY
static _T1 func(int _Flat_index, const _T2 _Base) __GPU
Definition: xxamp.h:644
static void func(const void *_Tex_data, _Out_ void *_Val, const _T &_Coord, unsigned int _Predefined_sampler_id, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
Definition: xxamp.h:1054
Definition: amprt.h:2152
Definition: xxamp.h:240
_In_ size_t _In_ int _Index
Definition: time.h:102
void __dp_sample_texture(const void *, const void *, _Out_ void *, float, float, float, unsigned int, float) __GPU_ONLY
static _T1::value_type func(const _T2 &a, int i) __GPU
Definition: xxamp.h:377
static const int LOOP_UNROLL_THRESHOLD
Definition: xxamp.h:247
static _T1::value_type func(typename _T1::value_type a, int i) __GPU
Definition: xxamp.h:387
static void func(_T &a, typename _T::value_type b) __GPU
Definition: xxamp.h:481
static void func(_T &a, const _T &b, const _T &c) __GPU
Definition: xxamp.h:343
static _T::value_type func(const _T1 &a) __GPU
Definition: xxamp.h:612
#define UINT_MAX
Definition: limits.h:36
void __dp_const_buffer_info(unsigned int, unsigned int) __GPU_ONLY
void __dp_d3d_msad4(_Out_ unsigned int *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int) __GPU_ONLY
static void _Is_valid_section(const _T2< _Rank > &_Base_extent, const _T1< _Rank > &_Section_origin, const _T2< _Rank > &_Section_extent) __CPU_ONLY
Definition: xxamp.h:1106
static bool func(const _T &a, const _T &b) __GPU
Definition: xxamp.h:412
void * _Get_datasource_identity(const _Array_type &_Array)
Definition: xxamp.h:1087
void __dp_d3d_tile_static_memory_fence() __GPU_ONLY
static void func(const void *_Tex_data, const void *_Sampler, _Out_ void *_Val, const _T &_Coord, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
Definition: xxamp.h:1005
static void func(_T &a, typename _T::value_type b) __GPU
Definition: xxamp.h:497
void __dp_write_texture(_Out_ void *, const void *, unsigned int, unsigned int, unsigned int) __GPU_ONLY
int i[4]
Definition: dvec.h:68
static void func(_Out_ void *_Tex_data, const void *_Ret, const _T &_Index) __GPU_ONLY
Definition: xxamp.h:992
static bool func(const _T &a, const _T &b) __GPU
Definition: xxamp.h:424
static void func(_T &a, const _T &b) __GPU
Definition: xxamp.h:303
static void func(unsigned int &_Total_extent, _T1 &_Multiplier, const _T2 &_Extent) __GPU
Definition: xxamp.h:854
#define _In_
Definition: sal.h:305
Definition: xxamp.h:243
unsigned int __dp_d3d_reversebitsu(unsigned int) __GPU_ONLY
int __dp_d3d_mini(int, int) __GPU_ONLY
static void func(_T &a, const _T2 &b) __GPU
Definition: xxamp.h:451
float __dp_d3d_rcpf(float) __GPU_ONLY
_In_ int _Base
Definition: time.h:323
static void func(unsigned int &_Total_extent, _T1 &_Multiplier, const _T2 &_Extent) __GPU
Definition: xxamp.h:866
#define __CPU_ONLY
Definition: amprt.h:47
array_view< _T, _R-1 > _Result_type
Definition: xxamp.h:118
static _T2 func(const _T1 *, const _T2 *_Index) __GPU
Definition: xxamp.h:897
Exception thrown due to a C++ AMP runtime_exception. This is the base type for all C++ AMP exception ...
Definition: amprt_exceptions.h:29
static bool func(const _EXT &_Ext, const _IDX &_Idx) __GPU
Definition: xxamp.h:745
static void _Is_valid_projection(int _I, const _T1< _Rank > &_Base_extent) __CPU_ONLY
Definition: xxamp.h:1131
_Check_return_ _In_z_ wchar_t const * _Mode
Definition: corecrt_wstdio.h:133
static bool func(const _T &a, const _T &b) __GPU
Definition: xxamp.h:403
static _T1::value_type func(typename _T1::value_type a[_T1::rank], int i) __GPU
Definition: xxamp.h:382
void __dp_read_texture(const void *, _Out_ void *, unsigned int, unsigned int, unsigned int, unsigned int) __GPU_ONLY
static void func(_RES_EXT &_ResArrayExtent, const _SRC_EXT &_SrcArrayExtent, _RES_EXT &_ResArrayMultiplier, const _SRC_EXT &_SrcArrayMultiplier, _RES_IDX &_ResViewOffset, const _SRC_IDX &_SrcViewOffset, _RES_EXT &_ResViewExtent, const _SRC_EXT &_SrcViewExtent) __GPU
Definition: xxamp.h:768
Definition: type_traits:1045
static void func(_T &a, const _T1 &b, const _T2 &c) __GPU
Definition: xxamp.h:550
extent< _Rank > _Get_extent_at_level_unsafe(const extent< _Rank > &_Base_extent, unsigned int _Level) __GPU
Definition: xxamp_inl.h:95
static void func(_T &a, const _T2 &b) __GPU
Definition: xxamp.h:476
_Ret_ _Ubiquitous_buffer * _Get_buffer(const _Array_type &_Array) __CPU_ONLY
Definition: xxamp.h:1069
static bool func(const _EXT &_Ext, const _IDX &_Idx) __GPU
Definition: xxamp.h:726
void __dp_entry_var_ptr_info(unsigned int) __GPU_ONLY
static void func(_T &a, const _T &b, const _T &c) __GPU
Definition: xxamp.h:349
static void func(_T &a, const _T2 &b) __GPU
Definition: xxamp.h:491
int __dp_d3d_absi(int) __GPU_ONLY
#define false
Definition: stdbool.h:16
void __dp_d3d_all_memory_fence() __GPU_ONLY
unsigned int __dp_d3d_interlocked_and(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
void __dp_d3d_all_memory_fence_with_tile_barrier() __GPU_ONLY
unsigned int _Get_max_mipmap_levels(const extent< _Rank > &_Extent)
Definition: xxamp.h:1225
static void func(_Out_ void *_Tex_data, const void *_Ret, const _T &_Index) __GPU_ONLY
Definition: xxamp.h:974
The extent type represents an N-dimensional vector of int which specifies the bounds of an N-dimen...
Definition: amp.h:383
An array is a multi-dimensional data aggregate on a accelerator_view.
Definition: amp.h:3865
static void func(const void *_Tex_data, _Out_ void *_Val, const _T &_Index, unsigned int _Mip_level) __GPU_ONLY
Definition: xxamp.h:952
int __dp_d3d_madi(int, int, int) __GPU_ONLY
static _T1 func(int _Flat_index, const _T2 _Base) __GPU
Definition: xxamp.h:657
unsigned int __dp_d3d_interlocked_min_uint(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
static void func(const void *_Tex_data, _Out_ void *_Val, const _T &_Coord, unsigned int _Predefined_sampler_id, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
Definition: xxamp.h:1036
unsigned int __dp_d3d_interlocked_exchange(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
static void _Is_nonnegative(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1168
static _T1 func(int _Flat_index, const _T2 _Base) __GPU
Definition: xxamp.h:690
static void func(_T &a, typename _T::value_type b) __GPU
Definition: xxamp.h:535
static void func(const void *_Tex_data, _Out_ void *_Val, const _T &_Index, unsigned int _Mip_level) __GPU_ONLY
Definition: xxamp.h:961
_Access_mode
Definition: amprt.h:88
static _T1 func(int _Flat_index, const _T2 _Base) __GPU
Definition: xxamp.h:673
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1063
Definition: xxamp.h:710
_Ret_ void * __dp_get_buffer_info(bool, unsigned int) __GPU_ONLY
static void func(_T &a, const _T &b) __GPU
Definition: xxamp.h:321
static _T2 func(const _T1 *_Multiplier, const _T2 *_Index) __GPU
Definition: xxamp.h:906
Definition: xxamp.h:228
float __dp_d3d_saturatef(float) __GPU_ONLY
A tiled_extent is an extent of 1 to 3 dimensions which also subdivides the extent space into 1-...
Definition: amp.h:1274
static bool func(const _T &a, const _T &b) __GPU
Definition: xxamp.h:281
Definition: xxamp.h:244
static void func(_T &a, const _T2 &b) __GPU
Definition: xxamp.h:527
#define _T(x)
Definition: tchar.h:2427
static void func(_T &a, const _T &b, const _T &c) __GPU
Definition: xxamp.h:337
static _T2 func(const _T1 *_Multiplier, const _T2 *_Index) __GPU
Definition: xxamp.h:924
static void func(_T &a, const _T2 &b) __GPU
Definition: xxamp.h:441
static void func(_T &a, const _T1 &b, const _T2 &c) __GPU
Definition: xxamp.h:562
int __dp_d3d_firstbithighi(int) __GPU_ONLY
_op_kind
Definition: xxamp.h:225
Definition: xxamp.h:237
Definition: xxamp.h:233
unsigned int __dp_d3d_interlocked_compare_exchange(_Inout_ unsigned int *, unsigned int, unsigned int) __GPU_ONLY
static void func(_T &a, const _T2 &b) __GPU
Definition: xxamp.h:508
_Ret_ void * __dp_get_sampler_info(unsigned int) __GPU_ONLY
void _Parallel_for_each(_In_ _Host_Scheduling_info *_Sch_info, extent< _Rank > _Compute_domain, const _Kernel_type &_F)
static _Projection_result_type< _T, _R >::_Result_type _Project0(_In_ array< _T, _R > *_Array, int _I) __GPU
Definition: xxamp_inl.h:73
static void func(_T &a, const _T1 &b, const _T2 &c) __GPU
Definition: xxamp.h:575
Definition: xxamp.h:241
static void func(_RES_EXT &_ResArrayExtent, const _SRC_EXT &_SrcArrayExtent, _RES_EXT &, const _SRC_EXT &, _RES_IDX &_ResViewOffset, const _SRC_IDX &_SrcViewOffset, _RES_EXT &_ResViewExtent, const _SRC_EXT &_SrcViewExtent) __GPU
Definition: xxamp.h:791
#define _Inout_
Definition: sal.h:375
_Check_return_ _Ret_maybenull_ _In_ size_t _In_ size_t _Offset
Definition: corecrt_malloc.h:159
Definition: xxamp.h:624
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
static _Projection_result_type< _T, _R >::_Const_result_type _Project0(const array_view< const _T, _R > *_Arr_view, int _I) __GPU
Definition: xxamp_inl.h:33
Definition: amprt.h:318
void __dp_d3d_device_memory_fence_with_tile_barrier() __GPU_ONLY
static void _Is_positive(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1144
static void func(_T &a, const _T &b) __GPU
Definition: xxamp.h:309
Definition: type_traits:1021
Definition: xxamp.h:235
unsigned int __dp_d3d_interlocked_max_uint(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
void __dp_d3d_device_memory_fence() __GPU_ONLY
static _T1 func(int _Flat_index, const _T2 _Base) __GPU
Definition: xxamp.h:632
An array_view is an N-dimensional view over data held in another container (such as array
Definition: amp.h:2236
static void func(_T &a, const _T &b) __GPU
Definition: xxamp.h:327
void _Is_valid_mipmap_range(unsigned int _Src_view_mipmap_levels, unsigned int _Dst_most_detailed_level, unsigned int _Dst_view_mipmap_levels)
Definition: xxamp.h:1266
#define __GPU_ONLY
Definition: amprt.h:46
void __dp_sample_texture_predefined(const void *, _Out_ void *, float, float, float, unsigned int, unsigned int, float) __GPU_ONLY
_FwdIt const _Ty _Val
Definition: algorithm:1938
Definition: xxamp.h:234
static bool func(const _EXT &_Ext, const _IDX &_Idx) __GPU
Definition: xxamp.h:712
static _T::value_type func(const _T1 &a) __GPU
Definition: xxamp.h:602
int __dp_d3d_signi(int) __GPU_ONLY
void __dp_no_source_func() __GPU_ONLY
Definition: xxamp.h:1399
unsigned int __dp_d3d_countbitsu(unsigned int) __GPU_ONLY
static bool func(const _T &a, const _T &b) __GPU
Definition: xxamp.h:269
void __dp_init_entry_var(_Out_ unsigned int *, unsigned int, unsigned int, unsigned int) __GPU_ONLY
static void func(_T &a, const _T &b) __GPU
Definition: xxamp.h:297
static const bool value
Definition: xxamp.h:106
static void func(unsigned int &_Total_extent, _T1 &_Multiplier, const _T2 &_Extent) __GPU
Definition: xxamp.h:834
void __dp_no_source_stub() __GPU_ONLY
#define true
Definition: stdbool.h:17
static void func(_T &a, const _T2 &b) __GPU
Definition: xxamp.h:462
float __dp_d3d_noisef(float) __GPU_ONLY
const _Texture_descriptor & _Get_texture_descriptor(const _Texture_type &_Tex) __GPU
Definition: xxamp.h:1094
unsigned int __dp_d3d_minu(unsigned int, unsigned int) __GPU_ONLY
#define _Ret_
Definition: sal.h:996
int __dp_d3d_interlocked_max_int(_Inout_ int *, int) __GPU_ONLY
static _T::value_type func(const _T1 &a) __GPU
Definition: xxamp.h:592
float __dp_d3d_stepf(float, float) __GPU_ONLY
static _Projection_result_type< _T, _R >::_Const_result_type _Project0(const array< _T, _R > *_Array, int _I) __GPU
Definition: xxamp_inl.h:65
const _T & _Const_result_type
Definition: xxamp.h:124
static void func(_T &a, const _T &b) __GPU
Definition: xxamp.h:315
static void func(_RES_EXT &_ResArrayExtent, const _SRC_EXT &_SrcArrayExtent, _RES_EXT &_ResArrayMultiplier, const _SRC_EXT &_SrcArrayMultiplier, _RES_IDX &_ResViewOffset, const _SRC_IDX &_SrcViewOffset, _RES_EXT &_ResViewExtent, const _SRC_EXT &_SrcViewExtent) __GPU
Definition: xxamp.h:805
static void func(const void *_Tex_data, const void *_Sampler, _Out_ void *_Val, const _T &_Coord, unsigned int _Kind, float _Level_of_detail) __GPU_ONLY
Definition: xxamp.h:1014