STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
amp_graphics.h
Go to the documentation of this file.
1 /***
2 * ==++==
3 *
4 * Copyright (c) Microsoft Corporation. All rights reserved.
5 *
6 * ==--==
7 * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
8 *
9 * amp_graphics.h
10 *
11 * C++ AMP Graphics Library
12 *
13 * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
14 ****/
15 
16 #pragma once
17 
18 #include <amp_short_vectors.h>
19 #include <array>
20 #include <dxgiformat.h>
21 #include <sstream>
22 
23 #define _AMP_GRAPHICS_H
24 
25 namespace Concurrency
26 {
27 
28 namespace graphics
29 {
30 
31 namespace details
32 {
33 
34 #pragma warning( push )
35 #pragma warning( disable : 6326 ) // Potential comparison of a constant with another constant
36 
37 template<typename _Ty>
39 {
40  typedef void _Scalar_type;
41  static const bool _Is_valid_SVT_for_texture = false;
43  static const unsigned int _Num_channels = 0;
44  static const unsigned int _Default_bits_per_channel = 0;
45 };
46 
47 template<>
49 {
50  typedef unsigned int _Scalar_type;
51  static const bool _Is_valid_SVT_for_texture = true;
53  static const unsigned int _Num_channels = 1;
54  static const unsigned int _Default_bits_per_channel = 32;
55 };
56 
57 template<>
59 {
61  static const bool _Is_valid_SVT_for_texture = true;
63  static const unsigned int _Num_channels = 2;
64  static const unsigned int _Default_bits_per_channel = 32;
65 };
66 
67 template<>
69 {
71  static const bool _Is_valid_SVT_for_texture = true;
73  static const unsigned int _Num_channels = 3;
74  static const unsigned int _Default_bits_per_channel = 32;
75 };
76 
77 template<>
79 {
81  static const bool _Is_valid_SVT_for_texture = true;
83  static const unsigned int _Num_channels = 4;
84  static const unsigned int _Default_bits_per_channel = 32;
85 };
86 
87 template<>
89 {
90  typedef int _Scalar_type;
91  static const bool _Is_valid_SVT_for_texture = true;
93  static const unsigned int _Num_channels = 1;
94  static const unsigned int _Default_bits_per_channel = 32;
95 };
96 
97 template<>
99 {
101  static const bool _Is_valid_SVT_for_texture = true;
103  static const unsigned int _Num_channels = 2;
104  static const unsigned int _Default_bits_per_channel = 32;
105 };
106 
107 template<>
109 {
111  static const bool _Is_valid_SVT_for_texture = true;
113  static const unsigned int _Num_channels = 3;
114  static const unsigned int _Default_bits_per_channel = 32;
115 };
116 
117 template<>
119 {
121  static const bool _Is_valid_SVT_for_texture = true;
123  static const unsigned int _Num_channels = 4;
124  static const unsigned int _Default_bits_per_channel = 32;
125 };
126 
127 
128 template<>
130 {
131  typedef float _Scalar_type;
132  static const bool _Is_valid_SVT_for_texture = true;
134  static const unsigned int _Num_channels = 1;
135  static const unsigned int _Default_bits_per_channel = 32;
136 };
137 
138 template<>
140 {
142  static const bool _Is_valid_SVT_for_texture = true;
144  static const unsigned int _Num_channels = 2;
145  static const unsigned int _Default_bits_per_channel = 32;
146 };
147 
148 template<>
150 {
152  static const bool _Is_valid_SVT_for_texture = true;
154  static const unsigned int _Num_channels = 3;
155  static const unsigned int _Default_bits_per_channel = 32;
156 };
157 
158 template<>
160 {
162  static const bool _Is_valid_SVT_for_texture = true;
164  static const unsigned int _Num_channels = 4;
165  static const unsigned int _Default_bits_per_channel = 32;
166 };
167 
168 template<>
170 {
172  static const bool _Is_valid_SVT_for_texture = true;
174  static const unsigned int _Num_channels = 1;
175  static const unsigned int _Default_bits_per_channel = 16;
176 };
177 
178 template<>
180 {
182  static const bool _Is_valid_SVT_for_texture = true;
184  static const unsigned int _Num_channels = 2;
185  static const unsigned int _Default_bits_per_channel = 16;
186 };
187 
188 template<>
190 {
192  static const bool _Is_valid_SVT_for_texture = false;
194  static const unsigned int _Num_channels = 0;
195  static const unsigned int _Default_bits_per_channel = 0;
196 };
197 
198 template<>
200 {
202  static const bool _Is_valid_SVT_for_texture = true;
204  static const unsigned int _Num_channels = 4;
205  static const unsigned int _Default_bits_per_channel = 16;
206 };
207 
208 template<>
210 {
212  static const bool _Is_valid_SVT_for_texture = true;
214  static const unsigned int _Num_channels = 1;
215  static const unsigned int _Default_bits_per_channel = 16;
216 };
217 
218 template<>
220 {
222  static const bool _Is_valid_SVT_for_texture = true;
224  static const unsigned int _Num_channels = 2;
225  static const unsigned int _Default_bits_per_channel = 16;
226 };
227 
228 template<>
230 {
232  static const bool _Is_valid_SVT_for_texture = false;
234  static const unsigned int _Num_channels = 0;
235  static const unsigned int _Default_bits_per_channel = 0;
236 };
237 
238 template<>
240 {
242  static const bool _Is_valid_SVT_for_texture = true;
244  static const unsigned int _Num_channels = 4;
245  static const unsigned int _Default_bits_per_channel = 16;
246 };
247 
248 
249 template<>
251 {
252  typedef double _Scalar_type;
253  static const bool _Is_valid_SVT_for_texture = true;
255  static const unsigned int _Num_channels = 2;
256  static const unsigned int _Default_bits_per_channel = 32;
257 };
258 
259 template<>
261 {
263  static const bool _Is_valid_SVT_for_texture = true;
265  static const unsigned int _Num_channels = 4;
266  static const unsigned int _Default_bits_per_channel = 32;
267 };
268 
269 template<>
271 {
273  static const bool _Is_valid_SVT_for_texture = false;
275  static const unsigned int _Num_channels = 0;
276  static const unsigned int _Default_bits_per_channel = 0;
277 };
278 
279 template<>
281 {
283  static const bool _Is_valid_SVT_for_texture = false;
285  static const unsigned int _Num_channels = 0;
286  static const unsigned int _Default_bits_per_channel = 0;
287 };
288 
289 template<typename _Short_vector_type>
291 {
295 }
296 
297 template<int _Rank>
298 std::array<size_t, 3> _Get_dimensions(const Concurrency::extent<_Rank> & _Ext, unsigned int _Mip_offset)
299 {
300  std::array<size_t, 3> _Arr;
301  // For un-used dimensions, use value 1.
302  switch((_Rank)) {
303  case 1:
304  _Arr[0] = static_cast<size_t>((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U);
305  _Arr[1] = 1;
306  _Arr[2] = 1;
307  break;
308  case 2:
309  _Arr[0] = static_cast<size_t>((_Ext[1] >> _Mip_offset) ? (_Ext[1] >> _Mip_offset) : 1U);
310  _Arr[1] = static_cast<size_t>((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U);
311  _Arr[2] = 1;
312  break;
313  case 3:
314  _Arr[0] = static_cast<size_t>((_Ext[2] >> _Mip_offset) ? (_Ext[2] >> _Mip_offset) : 1U);
315  _Arr[1] = static_cast<size_t>((_Ext[1] >> _Mip_offset) ? (_Ext[1] >> _Mip_offset) : 1U);
316  _Arr[2] = static_cast<size_t>((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U);
317  break;
318  default:
319  _ASSERTE(false);
320  _Arr[0] = 1;
321  _Arr[1] = 1;
322  _Arr[2] = 1;
323  break;
324  }
325  return _Arr;
326 }
327 
328 template <int _Rank>
329 std::array<size_t, 3> _Get_indices(const index<_Rank> &_Idx)
330 {
331  std::array<size_t, 3> _Arr;
332  // For un-used dimensions, use value 0.
333  switch((_Rank)) {
334  case 1:
335  _Arr[0] = static_cast<size_t>(_Idx[0]);
336  _Arr[1] = 0;
337  _Arr[2] = 0;
338  break;
339  case 2:
340  _Arr[0] = static_cast<size_t>(_Idx[1]);
341  _Arr[1] = static_cast<size_t>(_Idx[0]);
342  _Arr[2] = 0;
343  break;
344  case 3:
345  _Arr[0] = static_cast<size_t>(_Idx[2]);
346  _Arr[1] = static_cast<size_t>(_Idx[1]);
347  _Arr[2] = static_cast<size_t>(_Idx[0]);
348  break;
349  default:
350  _ASSERTE(false);
351  _Arr[0] = 0;
352  _Arr[1] = 0;
353  _Arr[2] = 0;
354  break;
355  }
356  return _Arr;
357 }
358 
359 template<int _Rank>
360 Concurrency::extent<_Rank> _Create_extent(size_t _Width, size_t _Height, size_t _Depth)
361 {
362  extent<_Rank> _Ext;
363  switch((_Rank)) {
364  case 1:
365  _Ext[0] = static_cast<int>(_Width);
366  break;
367  case 2:
368  _Ext[0] = static_cast<int>(_Height);
369  _Ext[1] = static_cast<int>(_Width);
370  break;
371  case 3:
372  _Ext[0] = static_cast<int>(_Depth);
373  _Ext[1] = static_cast<int>(_Height);
374  _Ext[2] = static_cast<int>(_Width);
375  break;
376  default:
377  _ASSERTE(false);
378  break;
379  }
380  return _Ext;
381 }
382 
383 // forward declaration
384 template <typename _Value_type, int _Rank> class _Texture_base;
385 template <typename _Value_type, int _Rank>
386 _Event _Copy_async_impl(const void * _Src, unsigned int _Src_byte_size, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Offset, const Concurrency::extent<_Rank> &_Copy_extent);
387 template <typename OutputIterator, typename _Value_type, int _Rank>
388 _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank> &_Src, OutputIterator _Dest_iter);
389 
390 template<typename _Value_type, int _Rank>
392  const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent);
393 
394 // The base class for texture, writeonly_texture_view
395 template <typename _Value_type, int _Rank>
396 class _Texture_base
397 {
398  static_assert(_Rank > 0 && _Rank <= 3, "texture is only supported for rank 1, 2, and 3.");
399  static_assert(_Short_vector_type_traits<typename std::remove_const<_Value_type>::type>::_Is_valid_SVT_for_texture, "invalid value_type for a texture.");
400 
401  // Friends
402  template<typename _T>
404  template<typename _T>
406  template<typename _Value_type, int _Rank>
407  friend _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank>& _Src, const index<_Rank> &_Src_offset,
408  const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent) __CPU_ONLY;
409 
410 public:
411  static const int rank = _Rank;
412  typedef _Value_type value_type;
414 
415 public:
419  __declspec(property(get=get_extent)) Concurrency::extent<_Rank> extent;
420  Concurrency::extent<_Rank> get_extent() const __GPU
421  {
422  return _M_extent;
423  }
424 
431  Concurrency::extent<_Rank> get_mipmap_extent(unsigned int _Mipmap_level) const __CPU_ONLY
432  {
433  if (_Mipmap_level >= this->get_mipmap_levels())
434  {
435  std::stringstream _Err_msg;
436  _Err_msg << "Value for _Mipmap_level parameter (" << _Mipmap_level
437  << ") cannot be greater than or equal to number of mipmap levels ("
438  << this->get_mipmap_levels() << ") on the texture or texture view";
439 
440  throw runtime_exception(_Err_msg.str().c_str(), E_INVALIDARG);
441  }
443  }
444 
451  Concurrency::extent<_Rank> get_mipmap_extent(unsigned int _Mipmap_level) const __GPU_ONLY
452  {
454  }
455 
460  Concurrency::accelerator_view get_accelerator_view() const __CPU_ONLY
461  {
463  }
464 
468  __declspec(property(get=get_bits_per_scalar_element)) unsigned int bits_per_scalar_element;
469  unsigned int get_bits_per_scalar_element() const __CPU_ONLY
470  {
471  unsigned int _Bits_per_channel = _Get_texture()->_Get_bits_per_channel();
472  return _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type ? _Bits_per_channel * (sizeof(double)/sizeof(int)) : _Bits_per_channel;
473  }
474 
481  __declspec(property(get=get_mipmap_levels)) unsigned int mipmap_levels;
482  unsigned int get_mipmap_levels() const __GPU
483  {
485  }
486 
492  __declspec(property(get=get_data_length)) unsigned int data_length;
493  unsigned int get_data_length() const __CPU_ONLY
494  {
496  }
497 
498 protected:
499  // internal storage abstraction
501 
502  _Texture_base() __CPU_ONLY
503  {
504  // This default ctor is required to enable move ctor for a derived types,
505  // empty _Texture_base is later initialized by move assignment operator
506  }
507 
508  _Texture_base(const Concurrency::extent<_Rank>& _Ext, unsigned int _Mipmap_levels = 1) __CPU_ONLY
509  : _M_extent(_Ext), _M_texture_descriptor(/*_Most_detailed_mipmap_level=*/0, _Mipmap_levels)
510  {
512  _Are_valid_mipmap_parameters(/*_Most_detailed_mipmap_level=*/0, _Mipmap_levels);
513 
514  // Validate if we can generate _Mipmap_levels number of mipmap levels given the dimensionality of the texture
515  unsigned int _Max_mipmap_levels = _Get_max_mipmap_levels(_M_extent);
516  if (_Mipmap_levels > _Max_mipmap_levels)
517  {
518  std::stringstream _Err_msg;
519  _Err_msg << "The texture extent is too small to generate (" << _Mipmap_levels << ") mipmap levels, the maximum allowed is (" << _Max_mipmap_levels << ")";
520  throw runtime_exception(_Err_msg.str().c_str(), E_INVALIDARG);
521  }
522  else if (_Mipmap_levels == 0)
523  {
524  // Generate full range of all mipmaps
525  // e.g. 2D 10x2 texture would have: 10x2, 5x1, 2x1, 1x1 (4 mipmap levels)
526  _Mipmap_levels = _Max_mipmap_levels;
527  }
528  _M_texture_descriptor._Set_view_mipmap_levels(_Mipmap_levels);
529  }
530 
531  // shallow copy for texture_views
532  _Texture_base(const _Texture_base & _Src) __GPU
533  : _M_extent(_Src._M_extent), _M_texture_descriptor(_Src._M_texture_descriptor)
534  {
535  }
536 
537  // shallow copy for texture_views that redefine range of mipmaps
538  _Texture_base(const _Texture_base & _Src, unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels) __CPU_ONLY
539  : _M_extent(_Get_extent_at_level(_Src.extent, _Most_detailed_mipmap_level)), _M_texture_descriptor(_Src._M_texture_descriptor, _Src._Get_most_detailed_mipmap_level() + _Most_detailed_mipmap_level, _View_mipmap_levels)
540  {
541  Concurrency::details::_Is_valid_mipmap_range(_Src.get_mipmap_levels(), _Most_detailed_mipmap_level, _View_mipmap_levels);
542  }
543 
544  // shallow copy for texture_views that in restrict(amp) context, the texture views can no longer redefine mipmap range,
545  // but read-write texture view needs to flatten to single mipmap level when created over a texture with multiple mipmap levels.
546  _Texture_base(const _Texture_base & _Src, bool _Flatten_mipmap_levels) __GPU_ONLY
547  : _M_extent(_Src.extent), _M_texture_descriptor(_Src._M_texture_descriptor, /*_Most_detailed_mipmap_level=*/0, _Flatten_mipmap_levels ? /*_View_mipmap_levels=*/1 : _Src.get_mipmap_levels())
548  {
549  }
550 
551  // interop
552  _Texture_base(const Concurrency::extent<_Rank>& _Ext, const _Texture_descriptor & _Desc) __CPU_ONLY
553  : _M_extent(_Ext), _M_texture_descriptor(_Desc)
554  {
556  }
557 
558  void _Copy_to(const _Texture_base & _Dest) const __CPU_ONLY
559  {
560  if (!(*this == _Dest))
561  {
562  _ASSERTE(this->extent == _Dest.extent);
563  details::_Copy_async_impl(*this, index<_Rank>(), _Dest, index<_Rank>(), _Dest.extent)._Get();
564  }
565  }
566 
567  bool operator==(const _Texture_base & _Other) const __CPU_ONLY
568  {
569  return _Other._M_extent == _M_extent && _Other._M_texture_descriptor == _M_texture_descriptor;
570  }
571 
573  {
574  }
575 
576  _Ret_ _Texture* _Get_texture() const __CPU_ONLY
577  {
579  }
580 
581  unsigned int _Get_most_detailed_mipmap_level() const __GPU
582  {
584  }
585 
586  bool _Are_mipmap_levels_overlapping(const _Texture_base &_Other) const __CPU_ONLY
587  {
588  return _M_texture_descriptor._Are_mipmap_levels_overlapping(&_Other._M_texture_descriptor);
589  }
590 
591 protected:
593  _Texture_descriptor _M_texture_descriptor;
594 };
595 
596 inline void _Is_valid_data_length(unsigned int _Num_elems, unsigned int _Bits_per_elem)
597 {
598  unsigned long long _Bytes_per_elem = static_cast<unsigned long long>(_Bits_per_elem / 8U);
599  unsigned long long _Total_bytes = static_cast<unsigned long long>(_Num_elems) * _Bytes_per_elem;
600  if (_Total_bytes > static_cast<unsigned long long>(UINT_MAX))
601  {
602  throw runtime_exception("Invalid - texture data_length exceeds UINT_MAX", E_INVALIDARG);
603  }
604 }
605 
606 template<typename _Iterator>
608 {
609  template<class _Uty> static auto _Fn(_Uty _Val, decltype(*_Val, ++_Val, 0)) -> std::true_type;
610  template<class _Uty> static auto _Fn(_Uty _Val, ...) -> std::false_type;
611  static constexpr bool value = decltype(_Fn(std::declval<_Iterator>(),0))::value;
612 };
613 
614 } // namespace details
615 
616 
618 
619 // forward declarations
620 template <typename _Value_type, int _Rank>
621 class texture;
622 template <typename _Value_type, int _Rank>
624 template <typename _Value_type, int _Rank>
626 class sampler;
627 
628 namespace direct3d
629 {
630 template<typename _Value_type, int _Rank>
631 texture<_Value_type, _Rank> make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format = DXGI_FORMAT_UNKNOWN) __CPU_ONLY;
632 
633 sampler make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY;
635 
636 } // namespace direct3d
637 
651 template <typename _Value_type, int _Rank> class texture : public details::_Texture_base<_Value_type, _Rank>
652 {
653  template<typename _Value_type, int _Rank>
654  friend texture<_Value_type,_Rank> direct3d::make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format) __CPU_ONLY;
655 
656  static_assert(!std::is_const<_Value_type>::value, "const value type is not supported for texture.");
657 
658 public:
659 
666  texture(const Concurrency::extent<_Rank>& _Ext) __CPU_ONLY
667  : _Texture_base(_Ext)
668  {
669  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
670  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
672  }
673 
680  texture(int _E0) __CPU_ONLY
682  {
683  static_assert(_Rank == 1, "texture(int) is only permissible on texture<value_type, 1>.");
684  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
685  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
687  }
688 
698  texture(int _E0, int _E1) __CPU_ONLY
700  {
701  static_assert(_Rank == 2, "texture(int, int) is only permissible on texture<value_type, 2>.");
702  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
703  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
705  }
706 
719  texture(int _E0, int _E1, int _E2) __CPU_ONLY
720  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
721  {
722  static_assert(_Rank == 3, "texture(int, int, int) is only permissible on texture<value_type, 3>.");
723  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
724  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
726  }
727 
738  : _Texture_base(_Ext)
739  {
740  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
741  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
742  _Initialize(_Av);
743  }
744 
760  texture(const Concurrency::extent<_Rank>& _Ext, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
761  : _Texture_base(_Ext)
762  {
763  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
764  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
765  _Initialize(_Av, _Associated_av);
766  }
767 
777  texture(int _E0, const Concurrency::accelerator_view& _Av) __CPU_ONLY
779  {
780  static_assert(_Rank == 1, "texture(int, accelerator_view) is only permissible on texture<value_type, 1>.");
781  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
782  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
783  _Initialize(_Av);
784  }
785 
801  texture(int _E0, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
803  {
804  static_assert(_Rank == 1, "texture(int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 1>.");
805  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
806  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
807  _Initialize(_Av, _Associated_av);
808  }
809 
822  texture(int _E0, int _E1, const Concurrency::accelerator_view& _Av) __CPU_ONLY
824  {
825  static_assert(_Rank == 2, "texture(int, int, accelerator_view) is only permissible on texture<value_type, 2>.");
826  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
827  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
828  _Initialize(_Av);
829  }
830 
849  texture(int _E0, int _E1, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
851  {
852  static_assert(_Rank == 2, "texture(int, int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 2>.");
853  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
854  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
855  _Initialize(_Av, _Associated_av);
856  }
857 
873  texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view& _Av) __CPU_ONLY
874  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
875  {
876  static_assert(_Rank == 3, "texture(int, int, int, accelerator_view) is only permissible on texture<value_type, 3>.");
877  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
878  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
879  _Initialize(_Av);
880  }
881 
903  texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
904  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
905  {
906  static_assert(_Rank == 3, "texture(int, int, int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 3>.");
907  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
908  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
909  _Initialize(_Av, _Associated_av);
910  }
911 
924  template<typename _Input_iterator,
925  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
926  texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
927  : _Texture_base(_Ext)
928  {
929  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
930  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
931  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last);
932  }
933 
946  template<typename _Input_iterator,
947  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
948  texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
950  {
951  static_assert(_Rank == 1, "texture(int, iterator, iterator) is only permissible on texture<value_type, 1>.");
952  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
953  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
954  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last);
955  }
956 
972  template<typename _Input_iterator,
973  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
974  texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
976  {
977  static_assert(_Rank == 2, "texture(int, int, iterator, iterator) is only permissible on texture<value_type, 2>.");
978  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
979  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
980  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last);
981  }
982 
983 
1002  template<typename _Input_iterator,
1003  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1004  texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
1005  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1006  {
1007  static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator) is only permissible on texture<value_type, 3>.");
1008  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1009  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1010  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last);
1011  }
1012 
1028  template<typename _Input_iterator,
1029  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1030  texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1031  : _Texture_base(_Ext)
1032  {
1033  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1034  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1035  _Initialize(_Av, _Src_first, _Src_last);
1036  }
1037 
1059  template<typename _Input_iterator,
1060  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1061  texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1062  : _Texture_base(_Ext)
1063  {
1064  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1065  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1066  _Initialize(_Av, _Associated_av, _Src_first, _Src_last);
1067  }
1068 
1084  template<typename _Input_iterator,
1085  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1086  texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1088  {
1089  static_assert(_Rank == 1, "texture(int, iterator, iterator, accelerator_view) is only permissible on texture<value_type, 1>.");
1090  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1091  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1092  _Initialize(_Av, _Src_first, _Src_last);
1093  }
1094 
1116  template<typename _Input_iterator,
1117  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1118  texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1120  {
1121  static_assert(_Rank == 1, "texture(int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture<value_type, 1>.");
1122  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1123  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1124  _Initialize(_Av, _Associated_av, _Src_first, _Src_last);
1125  }
1126 
1145  template<typename _Input_iterator,
1146  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1147  texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1149  {
1150  static_assert(_Rank == 2, "texture(int, int, iterator, iterator, accelerator_view) is only permissible on texture<value_type, 2>.");
1151  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1152  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1153  _Initialize(_Av, _Src_first, _Src_last);
1154  }
1155 
1180  template<typename _Input_iterator,
1181  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1182  texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1184  {
1185  static_assert(_Rank == 2, "texture(int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture<value_type, 2>.");
1186  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1187  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1188  _Initialize(_Av, _Associated_av, _Src_first, _Src_last);
1189  }
1190 
1212  template<typename _Input_iterator,
1213  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1214  texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1215  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1216  {
1217  static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator, accelerator_view) is only permissible on texture<value_type, 3>.");
1218  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1219  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1220  _Initialize(_Av, _Src_first, _Src_last);
1221  }
1222 
1250  template<typename _Input_iterator,
1251  typename = typename std::enable_if<details::_Is_iterator<_Input_iterator>::value>::type>
1252  texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1253  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1254  {
1255  static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture<value_type, 3>.");
1256  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1257  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1258  _Initialize(_Av, _Associated_av, _Src_first, _Src_last);
1259  }
1260 
1273  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1274  : _Texture_base(_Ext)
1275  {
1276  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1277  }
1278 
1295  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels) __CPU_ONLY
1296  : _Texture_base(_Ext, _Mipmap_levels)
1297  {
1298  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1299  }
1300 
1313  texture(int _E0, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1315  {
1316  static_assert(_Rank == 1, "texture(int, unsigned int) is only permissible on texture<value_type, 1>.");
1317  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1318  }
1319 
1335  texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1337  {
1338  static_assert(_Rank == 2, "texture(int, int, unsigned int) is only permissible on texture<value_type, 2>.");
1339  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1340  }
1341 
1363  texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1364  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1365  {
1366  static_assert(_Rank == 3, "texture(int, int, int, unsigned int) is only permissible on texture<value_type, 3>.");
1367  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1368  }
1369 
1370 
1386  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1387  : _Texture_base(_Ext)
1388  {
1389  _Initialize(_Av, _Bits_per_scalar_element);
1390  }
1391 
1411  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1412  : _Texture_base(_Ext, _Mipmap_levels)
1413  {
1414  _Initialize(_Av, _Bits_per_scalar_element);
1415  }
1416 
1438  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1439  : _Texture_base(_Ext)
1440  {
1441  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
1442  }
1443 
1459  texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1461  {
1462  static_assert(_Rank == 1, "texture(int, unsigned int, accelerator_view) is only permissible on texture<value_type, 1>.");
1463  _Initialize(_Av, _Bits_per_scalar_element);
1464  }
1465 
1487  texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1489  {
1490  static_assert(_Rank == 1, "texture(int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 1>.");
1491  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
1492  }
1493 
1512  texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1514  {
1515  static_assert(_Rank == 2, "texture(int, int, unsigned int, accelerator_view) is only permissible on texture<value_type, 2>.");
1516  _Initialize(_Av, _Bits_per_scalar_element);
1517  }
1518 
1543  texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1545  {
1546  static_assert(_Rank == 2, "texture(int, int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 2>.");
1547  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
1548  }
1549 
1571  texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1572  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1573  {
1574  static_assert(_Rank == 3, "texture(int, int, int, unsigned int, accelerator_view) is only permissible on texture<value_type, 3>.");
1575  _Initialize(_Av, _Bits_per_scalar_element);
1576  }
1577 
1605  texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1606  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1607  {
1608  static_assert(_Rank == 3, "texture(int, int, int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 3>.");
1609  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
1610  }
1611 
1630  texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1631  : _Texture_base(_Ext)
1632  {
1633  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element);
1634  }
1635 
1654  texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1656  {
1657  static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int) is only permissible on texture<value_type, 1>.");
1658  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element);
1659  }
1660 
1682  texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1684  {
1685  static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int) is only permissible on texture<value_type, 2>.");
1686  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element);
1687  }
1688 
1689 
1714  texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1715  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1716  {
1717  static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int) is only permissible on texture<value_type, 3>.");
1718  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element);
1719  }
1720 
1742  texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1743  : _Texture_base(_Ext)
1744  {
1745  _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1746  }
1747 
1775  texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1776  : _Texture_base(_Ext)
1777  {
1778  _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1779  }
1780 
1802  texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1804  {
1805  static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture<value_type, 1>.");
1806  _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1807  }
1808 
1836  texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1838  {
1839  static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 1>.");
1840  _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1841  }
1842 
1867  texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1869  {
1870  static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture<value_type, 2>.");
1871  _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1872  }
1873 
1904  texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1906  {
1907  static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 2>.");
1908  _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1909  }
1910 
1938  texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1939  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1940  {
1941  static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture<value_type, 3>.");
1942  _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1943  }
1944 
1978  texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1979  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1980  {
1981  static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 3>.");
1982  _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1983  }
1984 
1992  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
1993  {
1994  _Initialize(_Src.accelerator_view, _Src);
1995  }
1996 
2004  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2005  {
2006  _Initialize(_Src.accelerator_view, _Src);
2007  }
2008 
2019  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2020  {
2021  _Initialize(_Acc_view, _Src);
2022  }
2023 
2034  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2035  {
2036  _Initialize(_Acc_view, _Src);
2037  }
2038 
2053  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2054  {
2055  _Initialize(_Acc_view, _Associated_av, _Src);
2056  }
2057 
2072  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2073  {
2074  _Initialize(_Acc_view, _Associated_av, _Src);
2075  }
2076 
2083  texture(const texture & _Src)
2084  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2085  {
2086  _Initialize(_Src.accelerator_view, _Src.associated_accelerator_view, _Src);
2087  }
2088 
2095  texture(texture && _Other)
2096  {
2097  *this = std::move(_Other);
2098  }
2099 
2109  texture(const texture & _Src, const Concurrency::accelerator_view & _Av)
2110  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2111  {
2112  _Initialize(_Av, _Src);
2113  }
2114 
2128  texture(const texture & _Src, const Concurrency::accelerator_view & _Av, const Concurrency::accelerator_view& _Associated_av)
2129  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2130  {
2131  _Initialize(_Av, _Associated_av, _Src);
2132  }
2133 
2143  texture& operator=(const texture & _Other)
2144  {
2145  if (this != &_Other)
2146  {
2147  this->_M_extent = _Other._M_extent;
2149  _Initialize(_Other.accelerator_view, _Other.associated_accelerator_view, _Other);
2150  }
2151  return *this;
2152  }
2153 
2164  {
2165  if (this != &_Other)
2166  {
2167  this->_M_extent = _Other._M_extent;
2168  this->_M_texture_descriptor = _Other._M_texture_descriptor;
2169 
2170  _Other._M_texture_descriptor._M_data_ptr = NULL;
2171  _Other._M_texture_descriptor._Set_texture_ptr(NULL);
2172  }
2173  return *this;
2174  }
2175 
2182  void copy_to(texture & _Dest) const
2183  {
2184  if (this->extent != _Dest.extent)
2185  {
2186  throw runtime_exception("The source and destination textures must have the exactly the same extent.", E_INVALIDARG);
2187  }
2188 
2191  this->get_data_length());
2192 
2193  this->_Copy_to(_Dest);
2194 
2196  }
2197 
2204 #pragma warning( push )
2205 #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated
2207  {
2208  if (this->extent != _Dest.extent)
2209  {
2210  throw runtime_exception("The source and destination textures must have the exactly the same extent.", E_INVALIDARG);
2211  }
2212 
2215  this->get_data_length());
2216 
2217  this->_Copy_to(_Dest);
2218 
2220  }
2221 
2225  ~texture() __CPU_ONLY
2226  {
2227  }
2228 
2238  const _Value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY
2239  {
2241  _Texture_read_helper<index<_Rank>, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0);
2242  return _Tmp;
2243  }
2244 
2254  const _Value_type operator[] (int _I0) const __GPU_ONLY
2255  {
2256  static_assert(_Rank == 1, "value_type texture::operator[](int) is only permissible on texture<value_type, 1>.");
2257  return (*this)[index<1>(_I0)];
2258  }
2259 
2269  const _Value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY
2270  {
2271  return (*this)[_Index];
2272  }
2273 
2283  const _Value_type operator() (int _I0) const __GPU_ONLY
2284  {
2285  static_assert(_Rank == 1, "value_type texture::operator()(int) is only permissible on texture<value_type, 1>.");
2286  return (*this)[index<1>(_I0)];
2287  }
2288 
2301  const _Value_type operator() (int _I0, int _I1) const __GPU_ONLY
2302  {
2303  static_assert(_Rank == 2, "value_type texture::operator()(int, int) is only permissible on texture<value_type, 2>.");
2304  return (*this)[index<2>(_I0, _I1)];
2305  }
2306 
2322  const _Value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY
2323  {
2324  static_assert(_Rank == 3, "value_type texture::operator()(int, int, int) is only permissible on texture<value_type, 3>.");
2325  return (*this)[index<3>(_I0, _I1, _I2)];
2326  }
2327 
2337  const _Value_type get(const index<_Rank>& _Index) const __GPU_ONLY
2338  {
2339  return (*this)[_Index];
2340  }
2341 
2351  void set(const index<_Rank>& _Index, const _Value_type& _Value) __GPU_ONLY
2352  {
2353  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Invalid value_type for set method.");
2354  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Invalid value_type for set method.");
2355  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Invalid value_type for set method.");
2357  }
2358 
2362  _Ret_ void* data() __CPU_ONLY
2363  {
2364  return this->_Get_texture()->_Get_host_ptr();
2365  }
2366 
2370  const void* data() const __CPU_ONLY
2371  {
2372  return this->_Get_texture()->_Get_host_ptr();
2373  }
2374 
2379  __declspec(property(get=get_row_pitch)) unsigned int row_pitch;
2380  unsigned int get_row_pitch() const __CPU_ONLY
2381  {
2382  static_assert(_Rank >= 2, "row_pitch is only applicable to staging textures with rank 2 or higher.");
2383 
2384  if (!this->_Get_texture()->_Is_staging()) {
2385  throw runtime_exception("row_pitch is only applicable to staging textures.", E_INVALIDARG);
2386  }
2387 
2388  return static_cast<unsigned int>(this->_Get_texture()->_Get_row_pitch());
2389  }
2390 
2395  __declspec(property(get=get_depth_pitch)) unsigned int depth_pitch;
2396  unsigned int get_depth_pitch() const __CPU_ONLY
2397  {
2398  static_assert(_Rank == 3, "depth_pitch is only applicable to staging textures with rank 3.");
2399 
2400  if (!this->_Get_texture()->_Is_staging()) {
2401  throw runtime_exception("depth_pitch is only applicable to staging textures.", E_INVALIDARG);
2402  }
2403 
2404  return static_cast<unsigned int>(this->_Get_texture()->_Get_depth_pitch());
2405  }
2406 
2410  __declspec(property(get=get_associated_accelerator_view)) Concurrency::accelerator_view associated_accelerator_view;
2412  {
2413  return this->_Get_texture()->_Get_accelerator_view();
2414  }
2415 
2416 private:
2417  // Private constructor used by make_texture to create a texture from D3D texture
2418  texture(const Concurrency::extent<_Rank> & _Ext, const _Texture_descriptor & _Descriptor)
2419  : details::_Texture_base<_Value_type, _Rank>(_Ext, _Descriptor)
2420  {
2421  }
2422 
2424  {
2425  return (_Is_cpu_accelerator(_Av.accelerator) && !_Is_cpu_accelerator(_Associated_av.accelerator));
2426  }
2427 
2428  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, unsigned int _Bits_per_scalar_element) __CPU_ONLY
2429  {
2430  if (_Bits_per_scalar_element != 8 && _Bits_per_scalar_element != 16 &&
2431  _Bits_per_scalar_element != 32 && _Bits_per_scalar_element != 64)
2432  {
2433  throw runtime_exception("Invalid _Bits_per_scalar_element argument - it can only be 8, 16, 32, or 64.", E_INVALIDARG);
2434  }
2435 
2436  // special cases for 64 and for double based textures
2437 
2438 #pragma warning( push )
2439 #pragma warning( disable : 4127 ) // conditional expression is constant
2440  if (_Bits_per_scalar_element == 64 && _Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type)
2441  {
2442  throw runtime_exception("Invalid _Bits_per_scalar_element argument - 64 is only valid for texture of double based short vector types.", E_INVALIDARG);
2443  }
2444 
2445  if (_Bits_per_scalar_element != 64 && _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type)
2446  {
2447  throw runtime_exception("Invalid _Bits_per_scalar_element argument - it can only be 64 for texture of double based short vector types.", E_INVALIDARG);
2448  }
2449 
2451 
2452  // the rest of the check is done by _Texture::_Create_texture, it depends on the underlying supported DXGI formats.
2453 
2454  unsigned int _Bits_per_channel = _Bits_per_scalar_element;
2455 
2457  {
2459  }
2460 
2461  std::array<size_t, 3> _Dimensions = Concurrency::graphics::details::_Get_dimensions(this->_M_extent, /*_Mip_offset=*/0);
2462 
2463  // release the old texture first before allocating new one to avoid the chance on hitting OOM
2465  _Texture_ptr _Tex_ptr = NULL;
2466 
2467  // See if we need to allocate a staging texture
2468  if (_Should_create_staging_texture(_Av, _Associated_av)) {
2469 
2471  {
2472  throw runtime_exception("Creating staging textures with mipmap levels > 1 is not supported", E_INVALIDARG);
2473  }
2474 
2475  _Tex_ptr = _Texture::_Create_stage_texture(
2476  _Associated_av, _Av, _Rank, _Dimensions[0], _Dimensions[1], _Dimensions[2], this->_M_texture_descriptor._Get_view_mipmap_levels(),
2479  _Bits_per_channel);
2480 
2481  // Now map the texture
2482  _Tex_ptr->_Map_buffer(_Write_access, true /* _Wait */);
2483  }
2484  else {
2485  _Tex_ptr = _Texture::_Create_texture(_Av, _Rank, _Dimensions[0], _Dimensions[1], _Dimensions[2], this->_M_texture_descriptor._Get_view_mipmap_levels(),
2488  _Bits_per_channel);
2489  }
2490 
2491  this->_M_texture_descriptor._Set_texture_ptr(_Tex_ptr);
2492 #pragma warning( pop )
2493  }
2494 
2495  void _Initialize(const Concurrency::accelerator_view& _Av, unsigned int _Bits_per_scalar_element) __CPU_ONLY
2496  {
2497  _Initialize(_Av, _Av, _Bits_per_scalar_element);
2498  }
2499 
2500  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
2501  {
2502  _Initialize(_Av, _Associated_av, Concurrency::graphics::details::_Get_default_bits_per_scalar_element<_Value_type>());
2503  }
2504 
2505  void _Initialize(const Concurrency::accelerator_view& _Av) __CPU_ONLY
2506  {
2507  _Initialize(_Av, _Av);
2508  }
2509 
2510  template<typename _Input_iterator>
2511  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
2512  {
2513  _Initialize(_Av, _Associated_av);
2514 
2517  this->get_data_length());
2518 
2519  Concurrency::graphics::details::_Copy_async_impl(_Src_first, _Src_last, *this, index<_Rank>(), this->extent)._Get();
2520 
2522  }
2523 
2524  template<typename _Input_iterator>
2525  void _Initialize(const Concurrency::accelerator_view& _Av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
2526  {
2527  _Initialize(_Av, _Av, _Src_first, _Src_last);
2528  }
2529 
2530  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
2531  {
2532  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
2533  Concurrency::graphics::copy(_Source, _Src_byte_size, *this);
2534  }
2535 
2536  void _Initialize(const Concurrency::accelerator_view& _Av, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
2537  {
2538  _Initialize(_Av, _Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
2539  }
2540 
2541  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, const void * _Source, unsigned int _Src_byte_size) __CPU_ONLY
2542  {
2543  _Initialize(_Av, _Associated_av);
2544  Concurrency::graphics::copy(_Source, _Src_byte_size, *this);
2545  }
2546 
2547  void _Initialize(const Concurrency::accelerator_view& _Av, const void * _Source, unsigned int _Src_byte_size) __CPU_ONLY
2548  {
2549  _Initialize(_Av, _Av, _Source, _Src_byte_size);
2550  }
2551 
2553  {
2554  if (_Src.bits_per_scalar_element != 0) // _Src is not created via interop
2555  {
2556  _Initialize(_Av, _Associated_av, _Src.bits_per_scalar_element);
2557  }
2558  else // _Src is created via interop, create a new texture with the same properties as the existing one.
2559  {
2560  _Texture_ptr _New_tex;
2561  if (_Should_create_staging_texture(_Av, _Associated_av))
2562  {
2563  _New_tex = _Texture::_Clone_texture(concurrency::details::_Get_texture(_Src), _Associated_av, _Av);
2564  }
2565  else
2566  {
2567  _New_tex = _Texture::_Clone_texture(concurrency::details::_Get_texture(_Src), _Av, _Associated_av);
2568  }
2569  this->_M_texture_descriptor._Set_texture_ptr(_New_tex);
2570  }
2571 
2574  this->get_data_length());
2575 
2577 
2579  }
2580 
2582  {
2583  _Initialize(_Av, _Av, _Src);
2584  }
2585 };
2586 
2596 #pragma warning( push )
2597 #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated
2598 template <typename _Value_type, int _Rank> class __declspec(deprecated("writeonly_texture_view is deprecated. Please use texture_view instead.")) writeonly_texture_view : public details::_Texture_base<_Value_type, _Rank>
2599 {
2600  static_assert(!std::is_const<_Value_type>::value, "const value type is not supported for writeonly_texture_view.");
2601 
2602 public:
2610  : _Texture_base(_Src, /*_Most_detailed_mipmap_level=*/0, /*_View_mipmap_levels=*/1)
2611  {
2612  _Texture* _Tex = this->_Get_texture();
2613  if ((_Tex->_Get_num_channels() == 3) && (_Tex->_Get_bits_per_channel() == 32)) {
2614  throw runtime_exception("writeonly_texture_view cannot be created from a 3-channel texture with 32 bits per scalar element.", E_INVALIDARG);
2615  }
2616  if (_Tex->_Is_staging()) {
2617  throw runtime_exception("writeonly_texture_view cannot be created from a staging texture object.", E_INVALIDARG);
2618  }
2619  }
2620 
2628  : _Texture_base(_Src, /*_Flatten_mipmap_levels=*/true)
2629  {
2630  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Invalid value_type for the constructor.");
2631  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Invalid value_type for the constructor.");
2632  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Invalid value_type for the constructor.");
2633  }
2634 
2642  : _Texture_base(_Src)
2643  {
2644  }
2645 
2653  {
2654  if (this != &_Other)
2655  {
2656  this->_M_extent = _Other._M_extent;
2657  this->_M_texture_descriptor = _Other._M_texture_descriptor;
2658  }
2659  return *this;
2660  }
2661 
2665  ~writeonly_texture_view() __GPU
2666  {
2667  }
2668 
2678  void set(const index<_Rank>& _Index, const _Value_type& _Value) const __GPU_ONLY
2679  {
2680  _Texture_write_helper<index<_Rank>, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, &_Value, _Index);
2681  }
2682 };
2683 #pragma warning( pop )
2684 
2696 template <typename _Value_type, int _Rank> class texture_view : public details::_Texture_base<_Value_type, _Rank>
2697 {
2698  friend class texture_view<const _Value_type, _Rank>;
2699 
2700 public:
2711  texture_view(texture<_Value_type, _Rank>& _Src, unsigned int _Mipmap_level = 0) __CPU_ONLY
2712  : _Texture_base(_Src, _Mipmap_level, /*_View_mipmap_levels=*/1)
2713  {
2714  if (this->_Get_texture()->_Is_staging()) {
2715  throw runtime_exception("texture_view cannot be created from a staging texture object.", E_INVALIDARG);
2716  }
2717  }
2718 
2726  : _Texture_base(_Src, /*_Flatten_mipmap_levels=*/true)
2727  {
2728  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "writable texture_view can only be created from a single-component texture on an accelerator.");
2729  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "writable texture_view cannot be created from a unorm texture on an accelerator.");
2730  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "writable texture_view cannot be created from a norm texture on an accelerator.");
2731  }
2732 
2740  : _Texture_base(_Other)
2741  {
2742  }
2743 
2751  {
2752  if (this != &_Other)
2753  {
2754  this->_M_extent = _Other._M_extent;
2755  this->_M_texture_descriptor = _Other._M_texture_descriptor;
2756  }
2757  return *this;
2758  }
2759 
2763  ~texture_view() __GPU
2764  {
2765  }
2766 
2776  const _Value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY
2777  {
2778  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Read is only permissible on single-component writable texture_view.");
2779  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Read is not permissible on a writable unorm texture_view.");
2780  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Read is not permissible on a writable norm texture_view.");
2781 
2782  _Value_type _Tmp;
2783  _Texture_read_helper<index<_Rank>, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0);
2784  return _Tmp;
2785  }
2786 
2796  const _Value_type operator[] (int _I0) const __GPU_ONLY
2797  {
2798  static_assert(_Rank == 1, "const value_type operator[](int) is only permissible on texture_view<value_type, 1>.");
2799  return (*this)[index<1>(_I0)];
2800  }
2801 
2811  const _Value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY
2812  {
2813  return (*this)[_Index];
2814  }
2815 
2825  const _Value_type operator() (int _I0) const __GPU_ONLY
2826  {
2827  static_assert(_Rank == 1, "const value_type operator()(int) is only permissible on texture_view<value_type, 1>.");
2828  return (*this)[index<1>(_I0)];
2829  }
2830 
2843  const _Value_type operator() (int _I0, int _I1) const __GPU_ONLY
2844  {
2845  static_assert(_Rank == 2, "const value_type operator()(int, int) is only permissible on texture_view<value_type, 2>.");
2846  return (*this)[index<2>(_I0, _I1)];
2847  }
2848 
2864  const _Value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY
2865  {
2866  static_assert(_Rank == 3, "const value_type operator()(int, int, int) is only permissible on texture_view<value_type, 3>.");
2867  return (*this)[index<3>(_I0, _I1, _I2)];
2868  }
2869 
2879  const _Value_type get(const index<_Rank>& _Index) const __GPU_ONLY
2880  {
2881  return (*this)[_Index];
2882  }
2883 
2893  void set(const index<_Rank>& _Index, const _Value_type& _Value) const __GPU_ONLY
2894  {
2896  }
2897 };
2898 
2903 {
2906  filter_unknown = 0xFFFFFFFF,
2907 };
2908 
2913 {
2918  address_unknown = 0xFFFFFFFF,
2919 };
2920 
2924 class sampler
2925 {
2926  friend sampler direct3d::make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY;
2928 
2929  template <typename _Value_type, int _Rank>
2930  friend class texture_view;
2931 
2932 public:
2937  sampler() __CPU_ONLY
2940  _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f))
2941  {
2942  _Initialize();
2943  }
2944 
2952  sampler(filter_mode _Filter_mode)__CPU_ONLY
2953  : _M_filter_mode(_Filter_mode),
2955  _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f))
2956  {
2957  _Initialize();
2958  }
2959 
2970  sampler(address_mode _Address_mode, float_4 _Border_color = float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY
2972  _M_address_mode(_Address_mode),
2973  _M_border_color(_Border_color)
2974  {
2975  _Initialize();
2976  }
2977 
2991  sampler(filter_mode _Filter_mode, address_mode _Address_mode, float_4 _Border_color = float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY
2992  : _M_filter_mode(_Filter_mode),
2993  _M_address_mode(_Address_mode),
2994  _M_border_color(_Border_color)
2995  {
2996  _Initialize();
2997  }
2998 
3005  sampler(const sampler& _Other) __GPU
3006  : _M_filter_mode(_Other._M_filter_mode),
3007  _M_address_mode(_Other._M_address_mode),
3008  _M_border_color(_Other._M_border_color),
3009  _M_sampler_descriptor(_Other._M_sampler_descriptor)
3010  {
3011  }
3012 
3019  sampler(sampler &&_Other) __GPU
3020  : _M_filter_mode(_Other._M_filter_mode),
3024  {
3025  _Other._M_sampler_descriptor._M_data_ptr = NULL;
3026  _Other._M_sampler_descriptor._Set_sampler_ptr(NULL);
3027  }
3028 
3038  sampler& operator=(const sampler& _Other) __GPU
3039  {
3040  if (this != &_Other)
3041  {
3042  _M_filter_mode = _Other._M_filter_mode;
3043  _M_address_mode = _Other._M_address_mode;
3044  _M_border_color = _Other._M_border_color;
3045  _M_sampler_descriptor = _Other._M_sampler_descriptor;
3046  }
3047  return *this;
3048  }
3049 
3059  sampler& operator=(sampler&& _Other) __GPU
3060  {
3061  if (this != &_Other)
3062  {
3063  _M_filter_mode = _Other._M_filter_mode;
3064  _M_address_mode = _Other._M_address_mode;
3065  _M_border_color = _Other._M_border_color;
3066  _M_sampler_descriptor = _Other._M_sampler_descriptor;
3067  _Other._M_sampler_descriptor._M_data_ptr = NULL;
3068  _Other._M_sampler_descriptor._Set_sampler_ptr(NULL);
3069  }
3070  return *this;
3071  }
3072 
3076  __declspec(property(get=get_filter_mode)) Concurrency::graphics::filter_mode filter_mode;
3077  Concurrency::graphics::filter_mode get_filter_mode() const __GPU
3078  {
3079  return _M_filter_mode;
3080  }
3081 
3086  Concurrency::graphics::address_mode get_address_mode() const __GPU
3087  {
3088  return _M_address_mode;
3089  }
3090 
3094  __declspec(property(get=get_border_color)) Concurrency::graphics::float_4 border_color;
3095  Concurrency::graphics::float_4 get_border_color() const __GPU
3096  {
3097  return _M_border_color;
3098  }
3099 
3100 private:
3101  // internal storage abstraction
3103 
3104  // a private constructor to be used for constructing a sampler via interop.
3105  sampler(const _Sampler_descriptor & _Descriptor) __CPU_ONLY
3106  : _M_sampler_descriptor(_Descriptor),
3109  _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f))
3110  {
3111  // Although we could query border value from the adopted sampler, but it's not that useful
3112  // given that this is the only thing that we could query and when the address mode is not
3113  // address_border, border value is not relevant.
3114  }
3115 
3116  _Ret_ _Sampler* _Get_sampler_ptr() const __CPU_ONLY
3117  {
3119  }
3120 
3121  void _Initialize() __CPU_ONLY
3122  {
3123  // Check if the given filter_mode and address_mode are valid C++ AMP ones
3127  {
3128  throw runtime_exception("Invalid sampler configuration", E_INVALIDARG);
3129  }
3130 
3131  _Sampler_ptr samplerPtr = _Sampler::_Create(_M_filter_mode, _M_address_mode,
3134  }
3135 
3136  const _Sampler_descriptor & _Get_descriptor() const __GPU_ONLY
3137  {
3138  return _M_sampler_descriptor;
3139  }
3140 
3141  _Sampler_descriptor _M_sampler_descriptor;
3145 };
3146 
3156 template <typename _Value_type, int _Rank> class texture_view<const _Value_type, _Rank> : public details::_Texture_base<_Value_type, _Rank>
3157 {
3158 public:
3159  typedef const _Value_type value_type;
3162 
3170  : _Texture_base(_Src)
3171  {
3172  // only on the gpu it is not allowed
3173  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels != 1, "Read-only texture_view cannot be created from single-component textures on an accelerator.");
3174  }
3175 
3183  : _Texture_base(_Src)
3184  {
3185  if (this->_Get_texture()->_Is_staging()) {
3186  throw runtime_exception("Read-only texture_view cannot be created from a staging texture object.", E_INVALIDARG);
3187  }
3188  }
3189 
3202  texture_view(const texture<_Value_type, _Rank>& _Src, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY
3203  : _Texture_base(_Src, _Most_detailed_mip, _Mip_levels)
3204  {
3205  if (this->_Get_texture()->_Is_staging()) {
3206  throw runtime_exception("Read-only texture_view cannot be created from a staging texture object.", E_INVALIDARG);
3207  }
3208  }
3209 
3217  : _Texture_base(_Other)
3218  {
3219  }
3220 
3228  : _Texture_base(_Other)
3229  {
3230  }
3231 
3246  texture_view(const texture_view<const _Value_type, _Rank>& _Other, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY
3247  : _Texture_base(_Other, _Most_detailed_mip, _Mip_levels)
3248  {
3249  }
3250 
3258  {
3259  if (this != &_Other)
3260  {
3261  this->_M_extent = _Other._M_extent;
3262  this->_M_texture_descriptor = _Other._M_texture_descriptor;
3263  }
3264  return *this;
3265  }
3266 
3275  {
3276  this->_M_extent = _Other._M_extent;
3277  this->_M_texture_descriptor = _Other._M_texture_descriptor;
3278  return *this;
3279  }
3280 
3284  ~texture_view() __GPU
3285  {
3286  }
3287 
3297  const _Value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY
3298  {
3299  _Value_type _Tmp;
3300  _Texture_read_helper<index<_Rank>, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0);
3301  return _Tmp;
3302  }
3303 
3313  const _Value_type operator[] (int _I0) const __GPU_ONLY
3314  {
3315  static_assert(_Rank == 1, "value_type operator[](int) is only permissible on texture_view<value_type, 1>.");
3316  return (*this)[index<1>(_I0)];
3317  }
3318 
3328  const _Value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY
3329  {
3330  return (*this)[_Index];
3331  }
3332 
3342  const _Value_type operator() (int _I0) const __GPU_ONLY
3343  {
3344  static_assert(_Rank == 1, "value_type texture_view::operator()(int) is only permissible on texture_view<value_type, 1>.");
3345  return (*this)[index<1>(_I0)];
3346  }
3347 
3360  const _Value_type operator() (int _I0, int _I1) const __GPU_ONLY
3361  {
3362  static_assert(_Rank == 2, "value_type texture_view::operator()(int, int) is only permissible on texture_view<value_type, 2>.");
3363  return (*this)[index<2>(_I0, _I1)];
3364  }
3365 
3381  const _Value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY
3382  {
3383  static_assert(_Rank == 3, "value_type texture_view::operator()(int, int, int) is only permissible on texture_view<value_type, 3>.");
3384  return (*this)[index<3>(_I0, _I1, _I2)];
3385  }
3386 
3400  const _Value_type get(const index<_Rank>& _Index, unsigned int _Mip_level = 0) const __GPU_ONLY
3401  {
3402  _Value_type _Tmp;
3403  _Texture_read_helper<index<_Rank>, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, _Mip_level);
3404  return _Tmp;
3405  }
3406 
3423  const _Value_type sample(const sampler& _Sampler, const coordinates_type& _Coord, float _Level_of_detail = 0.0f) const __GPU_ONLY
3424  {
3425  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "sample is not allowed for uint component types in the texture value_type.");
3426  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "sample is not allowed for int component types in the texture value_type.");
3427  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "sample is not allowed for double component types in the texture value_type.");
3428 
3429  _Value_type _Tmp;
3430  _Texture_sample_helper<coordinates_type, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, _Sampler._Get_descriptor()._M_data_ptr, &_Tmp, _Coord, 4 /*Sampling*/, _Level_of_detail);
3431  return _Tmp;
3432  }
3433 
3453  template<filter_mode _Filter_mode = filter_linear, address_mode _Address_mode = address_clamp>
3454  const _Value_type sample(const coordinates_type& _Coord, float _Level_of_detail = 0.0f) const __GPU_ONLY
3455  {
3456  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "sample is not allowed for uint component types in the texture value_type.");
3457  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "sample is not allowed for int component types in the texture value_type.");
3458  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "sample is not allowed for double component types in the texture value_type.");
3459  static_assert((_Filter_mode == filter_point || _Filter_mode == filter_linear), "Invalid filter mode for sample method.");
3460  static_assert((_Address_mode == address_wrap || _Address_mode == address_clamp || _Address_mode == address_mirror || _Address_mode == address_border),
3461  "Invalid address mode for sample method.");
3462 
3463  _Value_type _Tmp;
3464  // Predefined sampler id is constructed as filter_mode << 16 | address_mode. This is a contract between BE and runtime. Modify with caution!
3465  _Texture_predefined_sample_helper<coordinates_type, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, &_Tmp, _Coord, _Filter_mode << 16 |_Address_mode, 4 /*Sampling*/, _Level_of_detail);
3466  return _Tmp;
3467  }
3468 
3481  const gather_return_type gather_red(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY
3482  {
3483  return _Gather(_Sampler, _Coord, 0);
3484  }
3485 
3498  const gather_return_type gather_green(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY
3499  {
3500  static_assert(1 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_green is valid only for textures with 2 or more components in the value_type.");
3501 
3502  return _Gather(_Sampler, _Coord, 1);
3503  }
3504 
3517  const gather_return_type gather_blue(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY
3518  {
3519  static_assert(2 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_blue is valid only for textures with 3 or more components in the value_type.");
3520 
3521  return _Gather(_Sampler, _Coord, 2);
3522  }
3523 
3536  const gather_return_type gather_alpha(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY
3537  {
3538  static_assert(3 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_alpha is valid only for textures with 4 components in the value_type.");
3539 
3540  return _Gather(_Sampler, _Coord, 3);
3541  }
3542 
3555  template<address_mode _Address_mode = address_clamp>
3556  const gather_return_type gather_red(const coordinates_type& _Coord) const __GPU_ONLY
3557  {
3558  return _Gather<_Address_mode>(_Coord, 0);
3559  }
3560 
3573  template<address_mode _Address_mode = address_clamp>
3574  const gather_return_type gather_green(const coordinates_type& _Coord) const __GPU_ONLY
3575  {
3576  static_assert(1 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_green is valid only for textures with 2 or more components in the value_type.");
3577 
3578  return _Gather<_Address_mode>(_Coord, 1);
3579  }
3580 
3593  template<address_mode _Address_mode = address_clamp>
3594  const gather_return_type gather_blue(const coordinates_type& _Coord) const __GPU_ONLY
3595  {
3596  static_assert(2 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_blue is valid only for textures with 3 or more components in the value_type.");
3597 
3598  return _Gather<_Address_mode>(_Coord, 2);
3599  }
3600 
3613  template<address_mode _Address_mode = address_clamp>
3614  const gather_return_type gather_alpha(const coordinates_type& _Coord) const __GPU_ONLY
3615  {
3616  static_assert(3 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_alpha is valid only for textures with 4 components in the value_type.");
3617 
3618  return _Gather<_Address_mode>(_Coord, 3);
3619  }
3620 
3621 private:
3622  const gather_return_type _Gather(const sampler& _Sampler, const coordinates_type& _Coord, unsigned int _Component) const __GPU_ONLY
3623  {
3624  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "gather is not allowed for uint component types in the texture value_type.");
3625  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "gather is not allowed for int component types in the texture value_type.");
3626  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "gather is not allowed for double component types in the texture value_type.");
3627  static_assert(_Rank == 2, "gather methods are only permissible on texture_view<value_type, 2>.");
3628 
3629  gather_return_type _Tmp;
3630  _Texture_sample_helper<coordinates_type, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, _Sampler._Get_descriptor()._M_data_ptr, &_Tmp, _Coord, _Component, /*_Level_of_detail=*/0.0f);
3631  return _Tmp;
3632  }
3633 
3634  template<address_mode _Address_mode>
3635  const gather_return_type _Gather(const coordinates_type& _Coord, unsigned int _Component) const __GPU_ONLY
3636  {
3637  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "gather is not allowed for uint component types in the texture value_type.");
3638  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "gather is not allowed for int component types in the texture value_type.");
3639  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "gather is not allowed for double component types in the texture value_type.");
3640  static_assert(_Rank == 2, "gather methods are only permissible on texture_view<value_type, 2>.");
3641  static_assert((_Address_mode == address_wrap || _Address_mode == address_clamp || _Address_mode == address_mirror || _Address_mode == address_border),
3642  "Invalid address mode for gather methods.");
3643 
3644  gather_return_type _Tmp;
3645  // Predefined sampler id is constructed as filter_mode << 16 | address_mode. This is a contract between BE and runtime. Modify with caution!
3646  // gather only used the address_mode of the sampler, internally we use filter_point so that the predefined sampler id scheme is same for both sample and gather.
3647  _Texture_predefined_sample_helper<coordinates_type, _Rank>::func(this->_M_texture_descriptor._M_data_ptr, &_Tmp, _Coord, filter_point << 16 |_Address_mode, _Component, /*_Level_of_detail=*/0.0f);
3648  return _Tmp;
3649  }
3650 };
3651 
3652 namespace details
3653 {
3654 
3655 template <typename T>
3657 {
3658  static const bool is_texture = false;
3659  static const bool is_writable = false;
3660 };
3661 
3662 template <typename _Value_type, int _Rank>
3663 struct texture_traits<texture<_Value_type, _Rank>>
3664 {
3665  static const bool is_texture = true;
3666  static const bool is_writable = true;
3667 };
3668 
3669 template <typename _Value_type, int _Rank>
3670 struct texture_traits<const texture<_Value_type, _Rank>>
3671 {
3672  static const bool is_texture = true;
3673  static const bool is_writable = false;
3674 };
3675 
3676 template <typename _Value_type, int _Rank>
3677 struct texture_traits<writeonly_texture_view<_Value_type, _Rank>>
3678 {
3679  static const bool is_texture = true;
3680  static const bool is_writable = true;
3681 };
3682 
3683 template <typename _Value_type, int _Rank>
3684 struct texture_traits<const writeonly_texture_view<_Value_type, _Rank>>
3685 {
3686  static const bool is_texture = true;
3687  static const bool is_writable = true;
3688 };
3689 
3690 template <typename _Value_type, int _Rank>
3691 struct texture_traits<texture_view<_Value_type, _Rank>>
3692 {
3693  static const bool is_texture = true;
3694  static const bool is_writable = true;
3695 };
3696 
3697 template <typename _Value_type, int _Rank>
3698 struct texture_traits<texture_view<const _Value_type, _Rank>>
3699 {
3700  static const bool is_texture = true;
3701  static const bool is_writable = false;
3702 };
3703 
3704 template <typename _Value_type, int _Rank>
3705 struct texture_traits<const texture_view<const _Value_type, _Rank>>
3706 {
3707  static const bool is_texture = true;
3708  static const bool is_writable = false;
3709 };
3710 
3711 template <typename _Value_type, int _Rank>
3712 struct texture_traits<const texture_view<_Value_type, _Rank>>
3713 {
3714  static const bool is_texture = true;
3715  static const bool is_writable = true;
3716 };
3717 
3718 // The helper function used by ETW and copy functions to calculate number of bytes for the copy operation given input section
3719 template <typename _Value_type, int _Rank>
3721 {
3722  _Texture* _Tex_ptr = _Get_texture(_Tex);
3723  _Texture_descriptor _Tex_desc = _Get_texture_descriptor(_Tex);
3724 
3725  return _Tex_ptr->_Get_data_length(_Tex_desc._Get_most_detailed_mipmap_level(), _Tex_desc._Get_view_mipmap_levels(), _Get_dimensions(_Extent, /*Mip_offset=*/0).data());
3726 }
3727 
3728 template <typename _Input_iterator, typename _Value_type>
3729 _Event _Copy_async_impl(_Input_iterator _First, _Input_iterator _Last,
3730  _In_ _Texture *_Dst, const size_t *_Dst_offset, unsigned int _Dst_mipmap_level,
3731  const size_t *_Copy_extent, const size_t *_Preferred_copy_chunk_extent = NULL)
3732 {
3733  _ASSERTE(_Dst != nullptr);
3734  _ASSERTE(_Dst_offset != nullptr);
3735  _ASSERTE(_Copy_extent != nullptr);
3736 
3737  _ASSERTE((unsigned int)std::distance(_First, _Last) >= (_Copy_extent[0] * _Copy_extent[1] * _Copy_extent[2]));
3738 
3739  // The copy region should be within the bounds of the destination texture
3740  _ASSERTE((_Dst_offset[0] + _Copy_extent[0]) <= _Dst->_Get_width(_Dst_mipmap_level));
3741  _ASSERTE((_Dst_offset[1] + _Copy_extent[1]) <= _Dst->_Get_height(_Dst_mipmap_level));
3742  _ASSERTE((_Dst_offset[2] + _Copy_extent[2]) <= _Dst->_Get_depth(_Dst_mipmap_level));
3743 
3744 #pragma warning( push )
3745 #pragma warning( disable : 4127 ) // conditional expression is constant
3746  if ((sizeof(_Value_type) > sizeof(unsigned char)) && (_Dst->_Get_bits_per_element() != (8U * sizeof(_Value_type))))
3747  {
3748  throw runtime_exception("Iterator-based copy is not supported on textures where the size of the _Value_type is not equal to the texel size.", E_INVALIDARG);
3749  }
3750 #pragma warning( pop )
3751 
3752  // If the dest is accessible on the host we can perform the copy entirely on the host
3753  if (_Dst->_Get_host_ptr() != NULL)
3754  {
3755  // We have made sure that the three multiplications below won't cause integer overflow when creating the texture
3756  _ASSERTE(((_Dst->_Get_bits_per_element() * _Copy_extent[0]) % (8U * sizeof(_Value_type))) == 0);
3757 
3758  size_t _Row_size = (_Dst->_Get_bits_per_element() * _Copy_extent[0]) >> 3; // in bytes
3759  size_t _Depth_slice_size = _Row_size * _Copy_extent[1];
3760 
3761  size_t _Row_pitch = _Dst->_Get_row_pitch();
3762  size_t _Depth_pitch = _Dst->_Get_depth_pitch();
3763  _ASSERTE(_Row_pitch >= _Row_size);
3764  _ASSERTE(_Depth_pitch >= _Depth_slice_size);
3765 
3766  size_t _Dst_offset_in_bytes = ((_Dst_offset[0] * _Dst->_Get_bits_per_element()) >> 3) +
3767  (_Dst_offset[1] * _Row_pitch) + (_Dst_offset[2] * _Depth_pitch);
3768 
3769  unsigned char *_PDest = reinterpret_cast<unsigned char*>(_Dst->_Get_host_ptr()) + _Dst_offset_in_bytes;
3770 
3771  _Copy_data_on_host_src_iter(_Dst->_Get_rank(), _First, reinterpret_cast<_Value_type*>(_PDest),
3772  _Row_size / sizeof(_Value_type), _Copy_extent[1], _Copy_extent[2],
3773  _Row_pitch, _Depth_pitch, _Row_size / sizeof(_Value_type), _Depth_slice_size / sizeof(_Value_type));
3774 
3775  return _Event();
3776  }
3777 
3778  // The dest is not accessible on the host; we need to copy src to
3779  // a temporary staging texture and launch a copy from the staging texture
3780  // to the dest texture.
3781  _Event _Ev;
3782 
3783  // Determine the copy chunk extent
3784  std::array<size_t, 3> _Copy_chunk_extent;
3785  if (_Preferred_copy_chunk_extent != NULL)
3786  {
3787  std::copy(&_Preferred_copy_chunk_extent[0], &_Preferred_copy_chunk_extent[3], _Copy_chunk_extent.begin());
3788  }
3789  else
3790  {
3791  _Get_preferred_copy_chunk_extent(_Dst->_Get_rank(), _Copy_extent[0], _Copy_extent[1], _Copy_extent[2], _Dst->_Get_bits_per_element(), _Copy_chunk_extent.data());
3792  }
3793 
3794  std::array<size_t, 3> _Curr_copy_offset;
3795  std::copy(&_Dst_offset[0], &_Dst_offset[3], _Curr_copy_offset.begin());
3796 
3797  std::array<size_t, 3> _Remaining_copy_extent;
3798  std::copy(&_Copy_extent[0], &_Copy_extent[3], _Remaining_copy_extent.begin());
3799 
3800  bool _Truncated_copy = false;
3801  do
3802  {
3803  _Texture_ptr _Dst_staging_tex_ptr;
3804  std::array<size_t, 3> _Curr_copy_extent;
3805  _Truncated_copy = _Get_chunked_staging_texture(_Dst, _Copy_chunk_extent.data(), _Remaining_copy_extent.data(), _Curr_copy_extent.data(), &_Dst_staging_tex_ptr);
3806 
3807 
3808  // Now copy from the src pointer to the temp staging texture
3809  _Dst_staging_tex_ptr->_Map_buffer(_Write_access, true /* _Wait */);
3810 
3811  std::array<size_t, 3> _Dst_staging_tex_offset;
3812  _Dst_staging_tex_offset.fill(0);
3813  _Event _Temp_ev = _Copy_async_impl<_Input_iterator, _Value_type>(_First, _Last, _Dst_staging_tex_ptr,
3814  _Dst_staging_tex_offset.data(), /*_Dst_mipmap_level=*/0, _Curr_copy_extent.data(), _Copy_chunk_extent.data());
3815 
3816  // Now chain a copy from the temporary staging texture to the _Dst texture
3817  _Texture_ptr _Dst_tex_ptr = _Dst;
3818  _Temp_ev = _Temp_ev._Add_continuation(std::function<_Event()>([_Dst_staging_tex_ptr, _Dst_tex_ptr, _Curr_copy_extent,
3819  _Dst_staging_tex_offset, _Curr_copy_offset, _Dst_mipmap_level]() mutable -> _Event
3820  {
3821  return _Dst_staging_tex_ptr->_Copy_to_async(_Dst_tex_ptr, _Curr_copy_extent.data(), _Dst_staging_tex_offset.data(), _Curr_copy_offset.data(), /*_Src_mipmap_level=*/0, _Dst_mipmap_level);
3822  }));
3823 
3824  _Ev = _Ev._Add_event(_Temp_ev);
3825 
3826  // Now adjust the _Src and _Dst offsets for the remaining part of the copy
3827  if (_Truncated_copy)
3828  {
3829  // The offset only needs to be adjusted in the most significant dimension
3830  _Curr_copy_offset[_Dst->_Get_rank() - 1] += _Curr_copy_extent[_Dst->_Get_rank() - 1];
3831  std::advance(_First, (((_Curr_copy_extent[0] * _Dst->_Get_bits_per_element()) >> 3) / sizeof(_Value_type)) * _Curr_copy_extent[1] * _Curr_copy_extent[2]);
3832  }
3833 
3834  } while (_Truncated_copy);
3835 
3836  return _Ev;
3837 }
3838 
3839 template <typename _Output_iterator, typename _Value_type>
3840 _Event _Copy_async_impl(_Texture *_Tex, const size_t *_Tex_offset, unsigned int _Src_mipmap_level, const size_t *_Copy_extent, _Output_iterator _First, const size_t *_Preferred_copy_chunk_extent = NULL)
3841 {
3842  _ASSERTE(_Tex != nullptr);
3843  _ASSERTE(_Tex_offset != nullptr);
3844  _ASSERTE(_Copy_extent != nullptr);
3845 
3846  // The copy region should be within the bounds of the source texture
3847  _ASSERTE((_Tex_offset[0] + _Copy_extent[0]) <= _Tex->_Get_width(_Src_mipmap_level));
3848  _ASSERTE((_Tex_offset[1] + _Copy_extent[1]) <= _Tex->_Get_height(_Src_mipmap_level));
3849  _ASSERTE((_Tex_offset[2] + _Copy_extent[2]) <= _Tex->_Get_depth(_Src_mipmap_level));
3850 
3851 #pragma warning( push )
3852 #pragma warning( disable : 4127 ) // conditional expression is constant
3853  if ((sizeof(_Value_type) > sizeof(unsigned char)) && (_Tex->_Get_bits_per_element() != (8U * sizeof(_Value_type))))
3854  {
3855  throw runtime_exception("Iterator-based copy is not supported on textures where the size of the _Value_type is not equal to the texel size.", E_INVALIDARG);
3856  }
3857 #pragma warning( pop )
3858 
3859  // If the texture is available on the host then we can perform the copy entirely on the host
3860  if (_Tex->_Get_host_ptr() != nullptr)
3861  {
3862  // We have made sure that the three multiplications below won't cause integer overflow when creating the texture
3863  _ASSERTE(((_Tex->_Get_bits_per_element() * _Copy_extent[0]) % 8U) == 0);
3864 
3865  size_t _Row_size = (_Tex->_Get_bits_per_element() * _Copy_extent[0]) >> 3; // in bytes
3866  size_t _Depth_slice_size = _Row_size * _Copy_extent[1];
3867 
3868  size_t _Row_pitch = _Tex->_Get_row_pitch();
3869  size_t _Depth_pitch = _Tex->_Get_depth_pitch();
3870  _ASSERTE(_Row_pitch >= _Row_size);
3871  _ASSERTE(_Depth_pitch >= _Depth_slice_size);
3872 
3873  size_t _Tex_offset_in_bytes = ((_Tex_offset[0] * _Tex->_Get_bits_per_element()) >> 3) +
3874  (_Tex_offset[1] * _Row_pitch) + (_Tex_offset[2] * _Depth_pitch);
3875 
3876  unsigned char *_PTex = reinterpret_cast<unsigned char*>(_Tex->_Get_host_ptr()) + _Tex_offset_in_bytes;
3877 
3878  _Copy_data_on_host_dst_iter(_Tex->_Get_rank(), reinterpret_cast<_Value_type*>(_PTex), _First,
3879  _Row_size / sizeof(_Value_type), _Copy_extent[1], _Copy_extent[2],
3880  _Row_pitch, _Depth_pitch, _Row_size / sizeof(_Value_type), _Depth_slice_size / sizeof(_Value_type));
3881 
3882  return _Event();
3883  }
3884 
3885  // The texture is not accessible on the host; we need to copy to/from a staging
3886  // texture before the copy to the destination. This is done in chunks, such that
3887  // we can concurrently copy from the source texture to a staging texture while
3888  // copying from a staging texture from a previous chunk to the destination.
3889  _Event _Ev;
3890 
3891  // Determine the copy chunk extent
3892  std::array<size_t, 3> _Copy_chunk_extent;
3893  if (_Preferred_copy_chunk_extent != nullptr)
3894  {
3895  std::copy(&_Preferred_copy_chunk_extent[0], &_Preferred_copy_chunk_extent[3], _Copy_chunk_extent.begin());
3896  }
3897  else
3898  {
3899  _Get_preferred_copy_chunk_extent(_Tex->_Get_rank(), _Copy_extent[0], _Copy_extent[1], _Copy_extent[2], _Tex->_Get_bits_per_element(), _Copy_chunk_extent.data());
3900  }
3901 
3902  std::array<size_t, 3> _Curr_copy_offset;
3903  std::copy(&_Tex_offset[0], &_Tex_offset[3], _Curr_copy_offset.begin());
3904 
3905  std::array<size_t, 3> _Remaining_copy_extent;
3906  std::copy(&_Copy_extent[0], &_Copy_extent[3], _Remaining_copy_extent.begin());
3907 
3908  bool _Truncated_copy = false;
3909 
3910  _Texture_ptr _Staging_tex_ptr;
3911  std::array<size_t, 3> _Curr_copy_extent;
3912  _Truncated_copy = _Get_chunked_staging_texture(_Tex, _Copy_chunk_extent.data(), _Remaining_copy_extent.data(), _Curr_copy_extent.data(), &_Staging_tex_ptr);
3913 
3914  // Now copy into the temp staging texture
3915  std::array<size_t, 3> _Staging_tex_offset;
3916  _Staging_tex_offset.fill(0);
3917  _Event _Temp_ev = _Copy_async_impl(_Tex, _Curr_copy_offset.data(), _Src_mipmap_level,
3918  _Staging_tex_ptr._Get_ptr(), _Staging_tex_offset.data(), /*_Dst_mipmap_level=*/0,
3919  _Curr_copy_extent.data(), _Copy_chunk_extent.data());
3920  _Ev = _Ev._Add_event(_Temp_ev);
3921 
3922  // If we have finished our copy, we just need to add a continuation to copy
3923  // from the temporary staging texture to the _Dst pointer
3924  if (!_Truncated_copy)
3925  {
3926  return _Ev._Add_continuation(std::function<_Event()>([_Staging_tex_ptr,
3927  _Curr_copy_extent, _Staging_tex_offset, _Copy_chunk_extent, _First]() mutable -> _Event
3928  {
3929  return _Copy_async_impl<_Output_iterator, _Value_type>(_Staging_tex_ptr, _Staging_tex_offset.data(), /*_Src_mipmap_level=*/0, _Curr_copy_extent.data(), _First, _Copy_chunk_extent.data());
3930  }));
3931  }
3932  else
3933  {
3934  // The copy was truncated. We need to recursively perform the rest of the copy
3935  _Texture_ptr _Tex_ptr = _Tex;
3936  _Curr_copy_offset[_Tex->_Get_rank() - 1] += _Curr_copy_extent[_Tex->_Get_rank() - 1];
3937  return _Ev._Add_continuation(std::function<_Event()>([_Staging_tex_ptr, _First, _Curr_copy_extent,
3938  _Staging_tex_offset, _Tex_ptr, _Curr_copy_offset, _Remaining_copy_extent, _Copy_chunk_extent, _Src_mipmap_level]() mutable -> _Event
3939  {
3940  // Initiate copying of the remaining portion
3941  _Output_iterator _New_dst_iter = _First;
3942  _Advance_output_iterator<decltype(_New_dst_iter), size_t>(_New_dst_iter, (((_Curr_copy_extent[0] * _Tex_ptr->_Get_bits_per_element()) >> 3) / sizeof(_Value_type)) * _Curr_copy_extent[1] * _Curr_copy_extent[2]);
3943  _Event _Ev1 = _Copy_async_impl<_Output_iterator, _Value_type>(_Tex_ptr, _Curr_copy_offset.data(), _Src_mipmap_level, _Remaining_copy_extent.data(), _New_dst_iter, _Copy_chunk_extent.data());
3944 
3945  // Now copy the data from the temp staging buffer to the _Dst pointer
3946  _Event _Ev2 = _Copy_async_impl<_Output_iterator, _Value_type>(_Staging_tex_ptr, _Staging_tex_offset.data(), /*_Src_mipmap_level=*/0, _Curr_copy_extent.data(), _First, _Copy_chunk_extent.data());
3947 
3948  return _Ev2._Add_event(_Ev1);
3949  }));
3950  }
3951 }
3952 
3953 template <typename _Value_type, int _Rank>
3954 _Event _Copy_async_impl(const void * _Src, unsigned int _Src_byte_size, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent)
3955 {
3956  _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent);
3957 
3958  if (_Dst.get_mipmap_levels() > 1)
3959  {
3960  throw runtime_exception("Invalid destination - multiple mipmap levels cannot be copied from source", E_INVALIDARG);
3961  }
3962 
3963  if (_Src_byte_size < _Get_section_size(_Dst, _Copy_extent))
3964  {
3965  if (_Dst.extent == _Copy_extent)
3966  {
3967  throw runtime_exception("Invalid _Src_byte_size argument. _Src_byte_size is smaller than the total size of _Dst.", E_INVALIDARG);
3968  }
3969  else
3970  {
3971  throw runtime_exception("Invalid _Src_byte_size argument. _Src_byte_size is smaller than the provided section of _Dst.", E_INVALIDARG);
3972  }
3973  }
3974 
3975  _Texture *_Dst_tex_ptr = _Get_texture(_Dst);
3976  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0);
3977  std::array<size_t, 3> _Dst_offset_arr = _Get_indices(_Dst_offset);
3978  auto _First = stdext::make_unchecked_array_iterator(reinterpret_cast<const unsigned char*>(_Src));
3979  auto _Last = stdext::make_unchecked_array_iterator(reinterpret_cast<const unsigned char*>(_Src) + _Src_byte_size);
3980 
3981  return _Copy_async_impl<decltype(_First), unsigned char>(_First, _Last, _Dst_tex_ptr, _Dst_offset_arr.data(), _Get_texture_descriptor(_Dst)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data());
3982 }
3983 
3984 template<typename _Value_type, int _Rank>
3985 _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank>& _Src, const index<_Rank> &_Src_offset, const extent<_Rank> &_Copy_extent, _Out_ void * _Dst, unsigned int _Dst_byte_size)
3986 {
3987  _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent);
3988 
3989  if (_Src.get_mipmap_levels() > 1)
3990  {
3991  throw runtime_exception("Invalid source - multiple mipmap levels cannot be copied to destination", E_INVALIDARG);
3992  }
3993 
3994  if (_Get_section_size(_Src, _Copy_extent) > _Dst_byte_size)
3995  {
3996  if (_Src.extent == _Copy_extent)
3997  {
3998  throw runtime_exception("Invalid _Dst_byte_size argument. _Dst_byte_size is smaller than the size of _Src.", E_INVALIDARG);
3999  }
4000  else
4001  {
4002  throw runtime_exception("Invalid _Dst_byte_size argument. _Dst_byte_size is smaller than the provided section of _Src.", E_INVALIDARG);
4003  }
4004  }
4005 
4006  _Texture *_Src_tex_ptr = _Get_texture(_Src);
4007  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0);
4008  std::array<size_t, 3> _Src_offset_arr = _Get_indices(_Src_offset);
4009 
4010  auto _First = stdext::make_unchecked_array_iterator(reinterpret_cast<unsigned char*>(_Dst));
4011 
4012  return _Copy_async_impl<decltype(_First), unsigned char>(_Src_tex_ptr, _Src_offset_arr.data(), _Get_texture_descriptor(_Src)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data(), _First);
4013 }
4014 
4015 template <typename _Output_iterator, typename _Value_type, int _Rank>
4016 _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank> &_Src, const index<_Rank> &_Src_offset, const extent<_Rank> &_Copy_extent, _Output_iterator _Dest_iter)
4017 {
4018  _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent);
4019 
4020  if (_Src.get_mipmap_levels() > 1)
4021  {
4022  throw runtime_exception("Invalid source - multiple mipmap levels cannot be copied to destination", E_INVALIDARG);
4023  }
4024 
4025  _Texture *_Src_tex_ptr = _Get_texture(_Src);
4026  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0);
4027  std::array<size_t, 3> _Src_offset_arr = _Get_indices(_Src_offset);
4028 
4029  return _Copy_async_impl<_Output_iterator, _Value_type>(_Src_tex_ptr, _Src_offset_arr.data(), _Get_texture_descriptor(_Src)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data(), _Dest_iter);
4030 }
4031 
4032 template <typename _Input_iterator, typename _Value_type, int _Rank>
4033 _Event _Copy_async_impl(_Input_iterator _First, _Input_iterator _Last, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent)
4034 {
4035  _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent);
4036  if (static_cast<unsigned int>(std::distance(_First, _Last)) < _Copy_extent.size())
4037  {
4038  throw runtime_exception("Inadequate amount of data supplied through the iterators", E_INVALIDARG);
4039  }
4040 
4041  if (_Dst.get_mipmap_levels() > 1)
4042  {
4043  throw runtime_exception("Invalid destination - multiple mipmap levels cannot be copied from source", E_INVALIDARG);
4044  }
4045 
4046  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0);
4047  std::array<size_t, 3> _Dst_offset_arr = _Get_indices(_Dst_offset);
4048 
4049  _Texture *_Dst_tex_ptr = _Get_texture(_Dst);
4050  return _Copy_async_impl<_Input_iterator, _Value_type>(_First, _Last, _Dst_tex_ptr, _Dst_offset_arr.data(), _Get_texture_descriptor(_Dst)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data());
4051 }
4052 
4053 template<typename _Value_type, int _Rank>
4055  const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset,
4056  const extent<_Rank> &_Copy_extent)
4057 {
4058  _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent);
4059  _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent);
4060 
4061  _Texture_descriptor _Src_tex_desc = _Get_texture_descriptor(_Src);
4062  _Texture_descriptor _Dst_tex_desc = _Get_texture_descriptor(_Dst);
4063 
4064  if (_Src_tex_desc._Get_view_mipmap_levels() != _Dst_tex_desc._Get_view_mipmap_levels())
4065  {
4066  throw runtime_exception("The source and destination textures must have the exactly the same number of mipmap levels for texture copy.", E_INVALIDARG);
4067  }
4068 
4069  bool _Is_whole_texture_copy = (_Src_offset == _Dst_offset && _Src_offset == index<_Rank>() && _Src.extent == _Dst.extent && _Src.extent == _Copy_extent);
4070 
4071  if (_Src_tex_desc._Get_view_mipmap_levels() > 1 && !_Is_whole_texture_copy)
4072  {
4073  throw runtime_exception("Sections are not allowed when copy involves multiple mipmap levels", E_INVALIDARG);
4074  }
4075 
4076  if (_Src_tex_desc._Are_mipmap_levels_overlapping(&_Dst_tex_desc))
4077  {
4078  throw runtime_exception("The source and destination are overlapping areas on the same texture", E_INVALIDARG);
4079  }
4080 
4081  _Texture* _Src_tex = _Get_texture(_Src);
4082  _Texture* _Dst_tex = _Get_texture(_Dst);
4083 
4084  // Formats must be identical for non-adopted textures. Textures created through D3D interop are not subject to this test
4085  // to allow copy between related, but not identical, formats. Attempting to copy between unrelated formats through interop
4086  // will result in exceptions in debug mode and undefined behavior in release mode.
4087  if (!_Src_tex->_Is_adopted() && !_Dst_tex->_Is_adopted() && (_Src_tex->_Get_texture_format() != _Dst_tex->_Get_texture_format()))
4088  {
4089  throw runtime_exception("The source and destination textures are not compatible.", E_INVALIDARG);
4090  }
4091 
4092  std::array<size_t, 3> _Src_offset_arr = _Get_indices(_Src_offset);
4093  std::array<size_t, 3> _Dst_offset_arr = _Get_indices(_Dst_offset);
4094 
4095  _Event _Copy_event;
4096 
4097  unsigned int _Src_most_detailed_mipmap_level = _Src_tex_desc._Get_most_detailed_mipmap_level();
4098  unsigned int _Dst_most_detailed_mipmap_level = _Dst_tex_desc._Get_most_detailed_mipmap_level();
4099 
4100  // Copy all mipmap levels from source to destination one by one.
4101  // Note that the offsets are not allowed therefore only dimensions need to be updated for subsequent mipmap levels
4102  for (unsigned int _Mip_offset = 0; _Mip_offset < _Src_tex_desc._Get_view_mipmap_levels(); ++_Mip_offset)
4103  {
4104  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, _Mip_offset);
4105 
4106  auto _Step_event = _Copy_async_impl(_Src_tex, _Src_offset_arr.data(), _Src_most_detailed_mipmap_level + _Mip_offset,
4107  _Dst_tex, _Dst_offset_arr.data(), _Dst_most_detailed_mipmap_level + _Mip_offset,
4108  _Copy_extent_arr.data());
4109 
4110  _Copy_event = _Copy_event._Add_event(_Step_event);
4111  }
4112 
4113  return _Copy_event;
4114 }
4115 
4116 } // namespace details
4117 
4136 template <typename _Src_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture, void>::type> void copy(const _Src_type &_Src, _Out_ void * _Dst, unsigned int _Dst_byte_size)
4137 {
4139  nullptr,
4140  _Get_section_size(_Src, _Src.extent));
4141 
4142  details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst, _Dst_byte_size)._Get();
4143 
4145 }
4146 
4171 template <typename _Src_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, _Out_ void * _Dst, unsigned int _Dst_byte_size)
4172 {
4174  nullptr,
4175  _Get_section_size(_Src, _Copy_extent));
4176 
4177  details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst, _Dst_byte_size)._Get();
4178 
4180 }
4181 
4182 
4201 template <typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst)
4202 {
4203  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4206  _Get_section_size(_Dst, _Dst.extent));
4207 
4208  details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, index<_Dst_type::rank>(), _Dst.extent)._Get();
4209 
4211 }
4212 
4234 template <typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst,
4235  const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent)
4236 {
4237  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4240  _Get_section_size(_Dst, _Copy_extent));
4241 
4242  details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, _Dst_offset, _Copy_extent)._Get();
4243 
4245 }
4246 
4247 
4269 template<typename _Src_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture, void>::type> concurrency::completion_future copy_async(const _Src_type &_Src, _Out_ void * _Dst, unsigned int _Dst_byte_size)
4270 {
4272  nullptr,
4273  _Get_section_size(_Src, _Src.extent));
4274 
4275  _Event _Ev = details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst, _Dst_byte_size);
4276 
4278 }
4279 
4304 template<typename _Src_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture, void>::type> concurrency::completion_future copy_async(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent,
4305  _Out_ void * _Dst, unsigned int _Dst_byte_size)
4306 {
4308  nullptr,
4309  _Get_section_size(_Src, _Copy_extent));
4310 
4311  _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst, _Dst_byte_size);
4312 
4314 }
4315 
4334 template <typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst)
4335 {
4336  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4339  _Get_section_size(_Dst, _Dst.extent));
4340 
4341  _Event _Ev = details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, index<_Dst_type::rank>(), _Dst.extent);
4342 
4344 }
4345 
4370 template <typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst,
4371  const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent)
4372 {
4373  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4376  _Get_section_size(_Dst, _Copy_extent));
4377 
4378  _Event _Ev = details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, _Dst_offset, _Copy_extent);
4379 
4381 }
4382 
4401 template <typename InputIterator, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(InputIterator _First, InputIterator _Last, _Dst_type &_Dst)
4402 {
4403  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4406  _Get_section_size(_Dst, _Dst.extent));
4407 
4408  details::_Copy_async_impl(_First, _Last, _Dst, index<_Dst_type::rank>(), _Dst.extent)._Get();
4409 
4411 }
4412 
4437 template <typename InputIterator, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(InputIterator _First, InputIterator _Last, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent)
4438 {
4439  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4442  _Get_section_size(_Dst, _Copy_extent));
4443 
4444  details::_Copy_async_impl(_First, _Last, _Dst, _Dst_offset, _Copy_extent)._Get();
4445 
4447 }
4448 
4461 template <typename _Src_type, typename OutputIterator, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && !details::texture_traits<OutputIterator>::is_texture, void>::type> void copy(const _Src_type &_Src, OutputIterator _Dst)
4462 {
4464  nullptr,
4465  _Get_section_size(_Src, _Src.extent));
4466 
4467  details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst)._Get();
4468 
4470 }
4471 
4490 template <typename _Src_type, typename OutputIterator, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && !details::texture_traits<OutputIterator>::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, OutputIterator _Dst)
4491 {
4493  nullptr,
4494  _Get_section_size(_Src, _Copy_extent));
4495 
4496  details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst)._Get();
4497 
4499 }
4500 
4516 template <typename _Src_type, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const _Src_type &_Src, _Dst_type &_Dst)
4517 {
4518  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4519 
4520  if (_Src.extent != _Dst.extent)
4521  {
4522  throw runtime_exception("The source and destination textures must have the exactly the same extent for whole-texture copy.", E_INVALIDARG);
4523  }
4524 
4527  _Get_section_size(_Dst, _Dst.extent));
4528 
4530 
4532 }
4533 
4558 template <typename _Src_type, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Src_type::rank> &_Copy_extent)
4559 {
4560  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4563  _Get_section_size(_Src, _Copy_extent));
4564 
4565  details::_Copy_async_impl(_Src, _Src_offset, _Dst, _Dst_offset, _Copy_extent)._Get();
4566 
4568 }
4569 
4591 template <typename InputIterator, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(InputIterator _First, InputIterator _Last, _Dst_type &_Dst)
4592 {
4593  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4596  _Get_section_size(_Dst, _Dst.extent));
4597 
4598  _Event _Ev = details::_Copy_async_impl<InputIterator, _Dst_type::value_type, _Dst_type::rank>(_First, _Last, _Dst, index<_Dst_type::rank>(), _Dst.extent);
4599 
4601 }
4602 
4630 template <typename InputIterator, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(InputIterator _First, InputIterator _Last, _Dst_type &_Dst,
4631  const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent)
4632 {
4633  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4636  _Get_section_size(_Dst, _Copy_extent));
4637 
4638  _Event _Ev = details::_Copy_async_impl<InputIterator, _Dst_type::value_type, _Dst_type::rank>(_First, _Last, _Dst, _Dst_offset, _Copy_extent);
4639 
4641 }
4642 
4658 template <typename _Src_type, typename OutputIterator, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && !details::texture_traits<OutputIterator>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, OutputIterator _Dst)
4659 {
4661  nullptr,
4662  _Get_section_size(_Src, _Src.extent));
4663 
4664  _Event _Ev = details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst);
4665 
4667 }
4668 
4690 template <typename _Src_type, typename OutputIterator, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && !details::texture_traits<OutputIterator>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, OutputIterator _Dst)
4691 {
4693  nullptr,
4694  _Get_section_size(_Src, _Copy_extent));
4695 
4696  _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst);
4697 
4699 }
4700 
4719 template <typename _Src_type, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, _Dst_type &_Dst)
4720 {
4721  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4722 
4723  if (_Src.extent != _Dst.extent)
4724  {
4725  throw runtime_exception("The source and destination textures must have the exactly the same extent for whole-texture copy.", E_INVALIDARG);
4726  }
4729  _Get_section_size(_Dst, _Dst.extent));
4730 
4732 
4734 }
4735 
4763 template <typename _Src_type, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, const index<_Src_type::rank> &_Src_offset, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Src_type::rank> &_Copy_extent)
4764 {
4765  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4766 
4769  _Get_section_size(_Src, _Copy_extent));
4770 
4771  _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Dst, _Dst_offset, _Copy_extent);
4772 
4774 }
4775 
4776 namespace details
4777 {
4778 template<int _Rank>
4779 Concurrency::extent<_Rank> _Make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, _Texture_base_type_id _Id, _Inout_ _Texture ** _Tex, DXGI_FORMAT _View_format) __CPU_ONLY
4780 {
4781  if (_D3D_texture == NULL)
4782  {
4783  throw runtime_exception("NULL D3D texture pointer.", E_INVALIDARG);
4784  }
4785 
4787  throw runtime_exception("Cannot create D3D texture on a non-D3D accelerator_view.", E_INVALIDARG);
4788  }
4789 
4790  _Texture * _Tex_ptr = _Texture::_Adopt_texture(_Rank, _Id, _D3D_texture, _Av, _View_format);
4791  if (_Tex_ptr->_Is_staging())
4792  {
4793  _Tex_ptr->_Map_buffer(_Write_access, true /* _Wait */);
4794  }
4795  Concurrency::extent<_Rank> _Ext = Concurrency::graphics::details::_Create_extent<_Rank>(_Tex_ptr->_Get_width(), _Tex_ptr->_Get_height(), _Tex_ptr->_Get_depth());
4796 
4797  _Is_valid_extent(_Ext);
4799 
4800  *_Tex = _Tex_ptr;
4801  return _Ext;
4802 }
4803 
4804 #pragma warning( pop )
4805 } // namespace details
4806 
4807 namespace direct3d
4808 {
4824  template<typename _Value_type, int _Rank> _Ret_ IUnknown *get_texture(const texture<_Value_type, _Rank> &_Texture) __CPU_ONLY
4825  {
4827  }
4828 
4844 #pragma warning( push )
4845 #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated
4846  template<typename _Value_type, int _Rank> _Ret_ IUnknown *get_texture(const writeonly_texture_view<_Value_type, _Rank> &_Texture) __CPU_ONLY
4847  {
4849  }
4850 #pragma warning( pop )
4851 
4867  template<typename _Value_type, int _Rank> _Ret_ IUnknown *get_texture(const texture_view<_Value_type, _Rank> &_Texture) __CPU_ONLY
4868  {
4870  }
4871 
4896  template<typename _Value_type, int _Rank> texture<_Value_type, _Rank> make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture,
4897  DXGI_FORMAT _View_format /*= DXGI_FORMAT_UKNNOWN*/) __CPU_ONLY
4898  {
4899  _Texture * _Tex_ptr = NULL;
4900 #pragma warning( suppress: 6326 ) // Potential comparison of a constant with another constant
4901  Concurrency::extent<_Rank> _Ext = Concurrency::graphics::details::_Make_texture<_Rank>(_Av, _D3D_texture,
4902  _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type ? _Uint_type : _Short_vector_type_traits<_Value_type>::_Format_base_type_id,
4903  &_Tex_ptr, _View_format);
4904 
4905  _ASSERTE(_Tex_ptr);
4906  return texture<_Value_type, _Rank>(_Ext, _Texture_descriptor(_Tex_ptr));
4907  }
4908 
4921  inline _Ret_ IUnknown * get_sampler(const Concurrency::accelerator_view &_Av, const sampler &_Sampler) __CPU_ONLY
4922  {
4924  }
4925 
4935  inline sampler make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY
4936  {
4937  return sampler(_Sampler_descriptor(_Sampler::_Create(_D3D_interop::_Get_D3D_sampler_data_ptr(_D3D_sampler))));
4938  }
4939 
4962  inline uint4 msad4(uint _Reference, uint2 _Source, uint4 _Accum) __GPU_ONLY
4963  {
4964  uint4 _Tmp;
4965  __dp_d3d_msad4(reinterpret_cast<uint*>(&_Tmp), _Reference, _Source.x, _Source.y, _Accum.x, _Accum.y, _Accum.z, _Accum.w);
4966  return _Tmp;
4967  }
4968 } // namespace direct3d
4969 
4970 } //namespace graphics
4971 } //namespace Concurrency
void copy(const _Src_type &_Src, _Out_ void *_Dst, unsigned int _Dst_byte_size)
Copies the contents of the source texture into the destination host buffer.
Definition: amp_graphics.h:4136
#define _Out_
Definition: sal.h:342
texture & operator=(const texture &_Other)
Copy assignment operator. Deep copy
Definition: amp_graphics.h:2143
texture(const Concurrency::extent< _Rank > &_Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture initialized from a pair of iterators into a container, bound to a specific accelerator_view and an associated accelerator_view that is the preferred location for copying to/from this texture.
Definition: amp_graphics.h:1061
uint_2::value_type _Scalar_type
Definition: amp_graphics.h:60
friend class accelerator
Definition: amprt.h:1444
texture_view< const _Value_type, _Rank > & operator=(const texture_view< const _Value_type, _Rank > &_Other) __GPU
Assignment operator. This read-only texture_view becomes a view of the same texture which _Other is a...
Definition: amp_graphics.h:3257
const _Value_type operator[](const index< _Rank > &_Index) const __GPU_ONLY
Get the element value indexed by _Index.
Definition: amp_graphics.h:2238
_Sampler_descriptor _M_sampler_descriptor
Definition: amp_graphics.h:3141
texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with two integers and initialized from a pair of iterators into a container...
Definition: amp_graphics.h:1147
unsigned int uint
Definition: amp_short_vectors.h:498
const _Value_type value_type
Definition: amp_graphics.h:3159
Definition: amp_graphics.h:2916
unsigned int _Get_bits_per_element() const
Definition: amprt.h:2251
Represent a short vector of 2 unorm's.
Definition: amp_short_vectors.h:11343
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2369
sampler & operator=(const sampler &_Other) __GPU
Assignment operator.
Definition: amp_graphics.h:3038
unsigned int _Get_section_size(const _Texture_base< _Value_type, _Rank > &_Tex, const extent< _Rank > &_Extent)
Definition: amp_graphics.h:3720
#define NULL
Definition: vcruntime.h:236
texture(const Concurrency::extent< _Rank > &_Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture initialized from a pair of iterators into a container, bound to a specific accele...
Definition: amp_graphics.h:1030
static const unsigned int _Default_bits_per_channel
Definition: amp_graphics.h:44
_Short_vector_base_type_id
Definition: amprt.h:291
texture(int _E0, int _E1, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from two integer extents, bound to a specific accelerator_view and a...
Definition: amp_graphics.h:849
uint4 msad4(uint _Reference, uint2 _Source, uint4 _Accum) __GPU_ONLY
Compares a 4-byte reference value and an 8-byte source value and accumulates a vector of 4 sums...
Definition: amp_graphics.h:4962
void _Get_preferred_copy_chunk_extent(unsigned int _Rank, size_t _Width, size_t _Height, size_t _Depth, size_t _Bits_per_element, _Out_writes_(3) size_t *_Preferred_copy_chunk_extent)
Definition: amprt.h:2619
static _AMPIMP _Ret_ IUnknown *__cdecl _Get_D3D_texture(_In_ _Texture *_Texture_ptr)
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:725
double_4::value_type _Scalar_type
Definition: amp_graphics.h:282
void _Initialize(const Concurrency::accelerator_view &_Av, const void *_Source, unsigned int _Src_byte_size) __CPU_ONLY
Definition: amp_graphics.h:2547
Definition: amprt.h:2369
texture(const texture_view< _Value_type, _Rank > &_Src, const Concurrency::accelerator_view &_Acc_view)
Construct a texture from a texture_view on another accelerator_view. Deep copy
Definition: amp_graphics.h:2018
sampler(sampler &&_Other) __GPU
Move constructor.
Definition: amp_graphics.h:3019
A texture is a data aggregate on an accelerator_view in the extent domain. It is a collection of vari...
Definition: amp_graphics.h:621
texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with two integers and specified bits per scalar element
Definition: amp_graphics.h:1335
_Ret_ IUnknown * get_texture(const texture< _Value_type, _Rank > &_Texture) __CPU_ONLY
Get the D3D texture interface underlying a texture.
Definition: amp_graphics.h:4824
__declspec(property(get=get_associated_accelerator_view)) Concurrency Concurrency::accelerator_view get_associated_accelerator_view() const __CPU_ONLY
Returns the accelerator_view that is the preferred target where this texture can be copied...
Definition: amp_graphics.h:2411
unsigned int _Get_texture_format() const
Definition: amprt.h:2230
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
extent< _Rank > _Get_extent_at_level(const extent< _Rank > &_Base_extent, unsigned int _Level)
Definition: xxamp_inl.h:141
sampler(const sampler &_Other) __GPU
Copy constructor.
Definition: amp_graphics.h:3005
texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with integer _E0 and specified bits per scalar element, bound to a specific accelerator and an associated accelerator_view that is the preferred location for copying to/from this texture.
Definition: amp_graphics.h:1487
Represent a short vector of 3 double's.
Definition: amp_short_vectors.h:18932
~texture_view() __GPU
Destructor
Definition: amp_graphics.h:3284
void _Copy_data_on_host_src_iter(int _Rank, _Input_iterator _Src, _Out_ _Value_type *_Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Dst_row_pitch_in_bytes, size_t _Dst_depth_pitch_in_bytes, size_t _Src_row_pitch, size_t _Src_depth_pitch)
Definition: amprt.h:2489
texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with three integers and specified bits per scalar element...
Definition: amp_graphics.h:1605
Definition: amprt.h:295
Concurrency::extent< _Rank > _Create_extent(size_t _Width, size_t _Height, size_t _Depth)
Definition: amp_graphics.h:360
float value_type
Definition: amp_short_vectors.h:8219
Represent a unorm number. Each element is a floating point number in the range of [0...
Definition: amp_short_vectors.h:37
_AMPIMP accelerator_view _Get_accelerator_view() const
texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with integer _E0 and specified bits per scalar element, bound to a specific...
Definition: amp_graphics.h:1459
__declspec(property(get=get_accelerator_view)) Concurrency Concurrency::accelerator_view get_accelerator_view() const __CPU_ONLY
Returns the accelerator_view where this texture or texture view is located.
Definition: amp_graphics.h:460
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from extents and specified bits per scalar element, bound to a specific accelerat...
Definition: amp_graphics.h:1386
texture(const texture_view< const _Value_type, _Rank > &_Src, const Concurrency::accelerator_view &_Acc_view)
Construct a texture from a read-only texture_view on another accelerator_view. Deep copy ...
Definition: amp_graphics.h:2033
Represent a short vector of 4 unsigned int's.
Definition: amp_short_vectors.h:1721
_Ret_ _Texture * _Get_texture(const _Texture_type &_Tex) __CPU_ONLY
Definition: xxamp.h:1100
texture(int _E0, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with integer _E0 and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view and an associated accelerator_view that is the preferred location for copying to/from this texture.
Definition: amp_graphics.h:1836
Represent a short vector of 2 unsigned int's.
Definition: amp_short_vectors.h:522
float_2::value_type _Scalar_type
Definition: amp_graphics.h:141
bool _Get_chunked_staging_texture(_In_ _Texture *_Tex, const size_t *_Copy_chunk_extent, _Inout_ size_t *_Remaining_copy_extent, _Out_ size_t *_Curr_copy_extent, _Out_ _Texture_ptr *_Staging_texture)
Definition: amprt.h:2443
_Texture_base(const _Texture_base &_Src, bool _Flatten_mipmap_levels) __GPU_ONLY
Definition: amp_graphics.h:546
address_mode
address modes supported for texture sampling
Definition: amp_graphics.h:2912
texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with two integers and specified bits per scalar element, bound to a specific accelerator and an associated accelerator_view that is the preferred location for copying to/from this texture.
Definition: amp_graphics.h:1543
unorm_2::value_type _Scalar_type
Definition: amp_graphics.h:181
A sampler class aggregates sampling configuration information to be used for texture sampling...
Definition: amp_graphics.h:2924
void set(const index< _Rank > &_Index, const _Value_type &_Value) __GPU_ONLY
Set the element indexed by _Index with value _Value.
Definition: amp_graphics.h:2351
texture(const Concurrency::extent< _Rank > &_Ext, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from extents and specified bits per scalar element, initialized from a host buffe...
Definition: amp_graphics.h:1742
__declspec(property(get=get_bits_per_scalar_element)) unsigned int bits_per_scalar_element
Returns the number of bits per scalar element
void _Copy_to(const _Texture_base &_Dest) const __CPU_ONLY
Definition: amp_graphics.h:558
#define __GPU
Definition: amprt.h:45
void _Initialize(const Concurrency::accelerator_view &_Av, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Definition: amp_graphics.h:2536
norm_4::value_type _Scalar_type
Definition: amp_graphics.h:241
size_t _Get_row_pitch() const
Definition: amprt.h:2311
sampler(address_mode _Address_mode, float_4 _Border_color=float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY
Constructs a sampler with default filter mode (filter_linear, same for min, mag, mip), but specified addressing mode (same for all dimensions) and border color.
Definition: amp_graphics.h:2970
std::array< size_t, 3 > _Get_dimensions(const Concurrency::extent< _Rank > &_Ext, unsigned int _Mip_offset)
Definition: amp_graphics.h:298
void _Copy_data_on_host_dst_iter(int _Rank, const _Value_type *_Src, _Output_iterator _Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Src_row_pitch_in_bytes, size_t _Src_depth_pitch_in_bytes, size_t _Dst_row_pitch, size_t _Dst_depth_pitch)
Definition: amprt.h:2553
texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with two integers and specified bits per scalar element, bound to a specific...
Definition: amp_graphics.h:1512
_In_ int _Val
Definition: vcruntime_string.h:62
texture(const Concurrency::extent< _Rank > &_Ext, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from extents, bound to a specific accelerator_view.
Definition: amp_graphics.h:737
texture(const Concurrency::extent< _Rank > &_Ext, const _Texture_descriptor &_Descriptor)
Definition: amp_graphics.h:2418
uint_3::value_type _Scalar_type
Definition: amp_graphics.h:70
Class represents a virtual device abstraction on a C++ AMP data-parallel accelerator ...
Definition: amprt.h:1442
__declspec(property(get=get_row_pitch)) unsigned int row_pitch
Returns the row pitch (in bytes) of a 2D or 3D staging texture on the CPU to be used for navigating t...
static constexpr bool value
Definition: amp_graphics.h:611
const gather_return_type _Gather(const coordinates_type &_Coord, unsigned int _Component) const __GPU_ONLY
Definition: amp_graphics.h:3635
unsigned int get_mipmap_levels() const __GPU
Definition: amp_graphics.h:482
static _AMPIMP _Ret_ IUnknown *__cdecl _Get_D3D_buffer(_In_ _Buffer *_Buffer_ptr)
Represent a short vector of 3 unorm's.
Definition: amp_short_vectors.h:11718
double_2::value_type _Scalar_type
Definition: amp_graphics.h:262
texture(const Concurrency::extent< _Rank > &_Ext, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from extents and specified bits per scalar element, initialized from a ho...
Definition: amp_graphics.h:1775
The Concurrency namespace provides classes and functions that provide access to the Concurrency Runti...
Definition: agents.h:43
Class represents a future corresponding to a C++ AMP asynchronous operation
Definition: amprt.h:1266
Represent a short vector of 2 int's.
Definition: amp_short_vectors.h:4189
Definition: amprt.h:297
static void _Is_valid_extent(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1195
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
void _Initialize(const Concurrency::accelerator_view &_Av) __CPU_ONLY
Definition: amp_graphics.h:2505
unsigned int _Get_most_detailed_mipmap_level() const __GPU
Definition: amp_graphics.h:581
size_t _Get_depth(unsigned int _Mip_offset=0) const
Definition: amprt.h:2220
void _Are_valid_mipmap_parameters(unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
Definition: xxamp.h:1246
unsigned int _Get_default_bits_per_scalar_element()
Definition: amp_graphics.h:290
_Value_type value_type
Definition: amp_graphics.h:412
texture(int _E0, int _E1, int _E2, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with three integers and specified bits per scalar element, initialized from a host buffer.
Definition: amp_graphics.h:1714
unsigned int value_type
Definition: amp_short_vectors.h:1724
static const bool is_texture
Definition: amp_graphics.h:3658
texture(int _E0) __CPU_ONLY
Construct texture with the extent _E0
Definition: amp_graphics.h:680
Definition: amprt.h:2152
integral_constant< bool, false > false_type
Definition: xtr1common:41
texture(int _E0, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with the extent _E0, bound to a specific accelerator_view and an ass...
Definition: amp_graphics.h:801
texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with integer _E0 and initialized from a pair of iterators into a container...
Definition: amp_graphics.h:1086
Definition: amp_graphics.h:607
Definition: amprt.h:294
unsigned int _Get_num_channels() const
Definition: amprt.h:2240
double value_type
Definition: amp_short_vectors.h:19666
_AMPIMP accelerator_view _Get_access_on_accelerator_view() const
const gather_return_type gather_alpha(const sampler &_Sampler, const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the specified sampling configuration and return the...
Definition: amp_graphics.h:3536
short_vector provides metaprogramming definitions which are useful for programming short vectors gene...
Definition: amp_short_vectors.h:23839
Represent a short vector of 4 double's.
Definition: amp_short_vectors.h:19663
float_4 _M_border_color
Definition: amp_graphics.h:3144
const _Value_type sample(const sampler &_Sampler, const coordinates_type &_Coord, float _Level_of_detail=0.0f) const __GPU_ONLY
Sample the texture at the given coordinates and level of detail using the specified sampling configur...
Definition: amp_graphics.h:3423
_AMPIMP bool __cdecl _Is_D3D_accelerator_view(const accelerator_view &_Av)
unsigned int get_data_length() const __CPU_ONLY
Definition: amp_graphics.h:493
_AMPIMP void _Get()
Wait until the _Event completes and throw any exceptions that occur.
unsigned int
Definition: vccorlib.h:2468
texture(const Concurrency::extent< _Rank > &_Ext, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Construct a texture initialized from a pair of iterators into a container.
Definition: amp_graphics.h:926
Represent a short vector of 3 int's.
Definition: amp_short_vectors.h:4597
short_vector< float, _Rank >::type coordinates_type
Definition: amp_graphics.h:3160
_AMPIMP _Ret_ _Amp_runtime_trace *__cdecl _Get_amp_trace()
const _Sampler_descriptor & _Get_descriptor() const __GPU_ONLY
Definition: amp_graphics.h:3136
texture(int _E0, int _E1, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from two integer extents, bound to a specific accelerator_view.
Definition: amp_graphics.h:822
__declspec(property(get=get_border_color)) Concurrency Concurrency::graphics::float_4 get_border_color() const __GPU
Returns the sampler's border value
Definition: amp_graphics.h:3095
bool _Is_adopted() const
Definition: amprt.h:2081
enum _Short_vector_base_type_id _Texture_base_type_id
Definition: amprt.h:302
texture_view(const texture< _Value_type, _Rank > &_Src) __GPU_ONLY
Construct a read-only texture_view of a texture _Src on an accelerator.
Definition: amp_graphics.h:3169
Represent a norm number. Each element is a floating point number in the range of [-1.0f, 1.0f].
Definition: amp_short_vectors.h:208
texture_view< const _Value_type, _Rank > & operator=(const texture_view< _Value_type, _Rank > &_Other) __CPU_ONLY
Assignment operator from a writable texture_view. This read-only texture_view becomes a view of the s...
Definition: amp_graphics.h:3274
texture(int _E0, int _E1) __CPU_ONLY
Construct a texture from two integer extents.
Definition: amp_graphics.h:698
unsigned int _Get_most_detailed_mipmap_level() const __GPU
Definition: amprt.h:659
#define UINT_MAX
Definition: limits.h:36
extent() __GPU
Default constructor. The value at each dimension is initialized to zero.
Definition: amp.h:404
friend _Event _Copy_async_impl(const _Texture_base< _Value_type, _Rank > &_Src, const index< _Rank > &_Src_offset, const _Texture_base< _Value_type, _Rank > &_Dst, const index< _Rank > &_Dst_offset, const extent< _Rank > &_Copy_extent) __CPU_ONLY
Definition: amp_graphics.h:4054
int_4::value_type _Scalar_type
Definition: amp_graphics.h:120
const gather_return_type gather_alpha(const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the predefined sampling configuration and return th...
Definition: amp_graphics.h:3614
texture(const texture_view< _Value_type, _Rank > &_Src)
Construct a texture from a texture_view. Deep copy
Definition: amp_graphics.h:1991
sampler(const _Sampler_descriptor &_Descriptor) __CPU_ONLY
Definition: amp_graphics.h:3105
void __dp_d3d_msad4(_Out_ unsigned int *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int) __GPU_ONLY
static _AMPIMP _Ret_ IUnknown *__cdecl _Get_D3D_sampler(const Concurrency::accelerator_view &_Av, _In_ _Sampler *_Sampler_ptr)
static void _Is_valid_section(const _T2< _Rank > &_Base_extent, const _T1< _Rank > &_Section_origin, const _T2< _Rank > &_Section_extent) __CPU_ONLY
Definition: xxamp.h:1106
texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from three integer extents, bound to a specific accelerator_view and...
Definition: amp_graphics.h:903
Definition: amprt.h:293
_Iter_diff_t< _InIt > distance(_InIt _First, _InIt _Last)
Definition: xutility:1124
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
void copy_to(texture &_Dest) const
Copy-to, deep copy
Definition: amp_graphics.h:2182
const void * data() const __CPU_ONLY
Returns a CPU pointer to the raw data of this texture.
Definition: amp_graphics.h:2370
~_Texture_base() __GPU
Definition: amp_graphics.h:572
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from extents and specified bits per scalar element, bound to a specific a...
Definition: amp_graphics.h:1438
texture(int _E0, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with integer _E0 and specified bits per scalar element, initialized from a ...
Definition: amp_graphics.h:1802
Concurrency::graphics::filter_mode _M_filter_mode
Definition: amp_graphics.h:3142
_Ret_ T * _Get_ptr() const
Definition: amprt.h:248
unsigned int get_row_pitch() const __CPU_ONLY
Definition: amp_graphics.h:2380
static const bool is_writable
Definition: amp_graphics.h:3659
_AMPIMP void _Map_buffer(_Access_mode _Map_type, bool _Wait)
void _Initialize() __CPU_ONLY
Definition: amp_graphics.h:3121
const _Value_type sample(const coordinates_type &_Coord, float _Level_of_detail=0.0f) const __GPU_ONLY
Sample the texture at the given coordinates and level of detail using the predefined sampling configu...
Definition: amp_graphics.h:3454
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture from extents and specified bits per scalar element
Definition: amp_graphics.h:1273
texture(const texture_view< const _Value_type, _Rank > &_Src, const Concurrency::accelerator_view &_Acc_view, const Concurrency::accelerator_view &_Associated_av)
Construct a staging texture from a read-only texture_view on another accelerator_view. Deep copy
Definition: amp_graphics.h:2071
std::array< size_t, 3 > _Get_indices(const index< _Rank > &_Idx)
Definition: amp_graphics.h:329
static const bool _Is_valid_SVT_for_texture
Definition: amp_graphics.h:41
Definition: amp_graphics.h:384
texture(int _E0, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with integer _E0 and specified bits per scalar element, initialized from a h...
Definition: amp_graphics.h:1654
sampler() __CPU_ONLY
Constructs a sampler with default filter mode (filter_lienar, same for min, mag, mip), addressing mode (address_clamp, same for all dimensions), and border color (float_4(0.0f, 0.0f, 0.0f, 0.0f)).
Definition: amp_graphics.h:2937
_Ret_ void * data() __CPU_ONLY
Returns a CPU pointer to the raw data of this texture.
Definition: amp_graphics.h:2362
static auto _Fn(_Uty _Val, decltype(*_Val,++_Val, 0)) -> std::true_type
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from extents, specified bits per scalar element and number of mipmap levels ...
Definition: amp_graphics.h:1411
Concurrency::extent< _Rank > get_mipmap_extent(unsigned int _Mipmap_level) const __CPU_ONLY
Returns the extent for specific mipmap level of this texture or texture view.
Definition: amp_graphics.h:431
unsigned int _Get_view_mipmap_levels() const __GPU
Definition: amprt.h:664
texture(int _E0, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with integer _E0 and specified bits per scalar element
Definition: amp_graphics.h:1313
#define _In_
Definition: sal.h:305
int_2::value_type _Scalar_type
Definition: amp_graphics.h:100
_Texture_base(const _Texture_base &_Src) __GPU
Definition: amp_graphics.h:532
const gather_return_type gather_red(const sampler &_Sampler, const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the specified sampling configuration and return the...
Definition: amp_graphics.h:3481
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Definition: amp_graphics.h:2511
texture_view(const texture_view< const _Value_type, _Rank > &_Other) __GPU
Construct a read-only texture_view from another read-only texture_view. Both are views of the same te...
Definition: amp_graphics.h:3227
Definition: amp_graphics.h:2906
Represent a short vector of 4 unorm's.
Definition: amp_short_vectors.h:12488
const gather_return_type gather_blue(const sampler &_Sampler, const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the specified sampling configuration and return the...
Definition: amp_graphics.h:3517
_Texture_base(const _Texture_base &_Src, unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels) __CPU_ONLY
Definition: amp_graphics.h:538
void copy_to(const writeonly_texture_view< _Value_type, _Rank > &_Dest) const
Copy-to, deep copy
Definition: amp_graphics.h:2206
norm_2::value_type _Scalar_type
Definition: amp_graphics.h:221
struct Concurrency::details::_Sampler_descriptor _Sampler_descriptor
texture & operator=(texture< _Value_type, _Rank > &&_Other)
Move assignment operator
Definition: amp_graphics.h:2163
texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with three integers and specified bits per scalar element, bound to a specific accelerator.
Definition: amp_graphics.h:1571
#define __CPU_ONLY
Definition: amprt.h:47
Definition: amp_graphics.h:623
IUnknown * _M_data_ptr
Definition: amprt.h:558
Represent a short vector of 2 double's.
Definition: amp_short_vectors.h:18590
_Short_vector_type_traits< _Value_type >::_Scalar_type scalar_type
Definition: amp_graphics.h:413
Exception thrown due to a C++ AMP runtime_exception. This is the base type for all C++ AMP exception ...
Definition: amprt_exceptions.h:29
sampler(filter_mode _Filter_mode) __CPU_ONLY
Constructs a sampler with specified filter mode (same for min, mag, mip), but with default addressing...
Definition: amp_graphics.h:2952
_Ret_ _Sampler * _Get_sampler_ptr() const __CPU_ONLY
Definition: amp_graphics.h:3116
_Ret_ void * _Get_host_ptr() const
Definition: amprt.h:2019
_AMPIMP accelerator __cdecl _Select_default_accelerator()
texture(const Concurrency::extent< _Rank > &_Ext, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture from extents and specified bits per scalar element, initialized from a host buffe...
Definition: amp_graphics.h:1630
bool _Are_mipmap_levels_overlapping(const _Texture_descriptor *_Other) const __CPU_ONLY
Definition: amprt.h:699
void _Initialize(const Concurrency::accelerator_view &_Av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Definition: amp_graphics.h:2525
texture_view(const texture_view< const _Value_type, _Rank > &_Other, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY
Construct a read-only texture_view from another read-only texture_view. Allows narrowing down the acc...
Definition: amp_graphics.h:3246
Represent a short vector of 3 float's.
Definition: amp_short_vectors.h:8216
extent< _Rank > _Get_extent_at_level_unsafe(const extent< _Rank > &_Base_extent, unsigned int _Level) __GPU
Definition: xxamp_inl.h:95
unsigned int get_depth_pitch() const __CPU_ONLY
Definition: amp_graphics.h:2396
Represent a short vector of 4 int's.
Definition: amp_short_vectors.h:5400
unsigned int _Get_data_length(unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels, const size_t *_Extents=nullptr) const
Definition: amprt.h:2256
Concurrency::extent< _Rank > _Make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, _Texture_base_type_id _Id, _Inout_ _Texture **_Tex, DXGI_FORMAT _View_format) __CPU_ONLY
Definition: amp_graphics.h:4779
Concurrency::extent< _Rank > get_mipmap_extent(unsigned int _Mipmap_level) const __GPU_ONLY
Returns the extent for specific mipmap level of this texture or texture view.
Definition: amp_graphics.h:451
const gather_return_type gather_blue(const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the predefined sampling configuration and return th...
Definition: amp_graphics.h:3594
_Ret_ _Sampler * _Get_sampler_ptr() const __CPU_ONLY
Definition: amprt.h:780
const gather_return_type gather_red(const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the predefined sampling configuration and return th...
Definition: amp_graphics.h:3556
A texture_view provides read-only access and sampling capability to a textu...
Definition: amp_graphics.h:3156
float value_type
Definition: amp_short_vectors.h:8950
void set(const index< _Rank > &_Index, const _Value_type &_Value) const __GPU_ONLY
Set the element indexed by _Index with value _Value.
Definition: amp_graphics.h:2893
unsigned int _Get_max_mipmap_levels(const extent< _Rank > &_Extent)
Definition: xxamp.h:1225
The extent type represents an N-dimensional vector of int which specifies the bounds of an N-dimen...
Definition: amp.h:383
void * _M_data_ptr
Definition: amprt.h:720
texture(const texture &_Src)
Copy constructor. Deep copy
Definition: amp_graphics.h:2083
texture(int _E0, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with the extent _E0, bound to a specific accelerator_view.
Definition: amp_graphics.h:777
unsigned int _Get_rank() const
Definition: amprt.h:2225
size_t _Get_depth_pitch() const
Definition: amprt.h:2321
unsigned int value_type
Definition: amp_short_vectors.h:525
_AMPIMP ULONG _Launch_async_copy_event_helper(const _Buffer_descriptor &_Src, const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy)
Definition: amp_graphics.h:2914
texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with three integers and initialized from a pair of iterators into a containe...
Definition: amp_graphics.h:1214
void _Initialize(const Concurrency::accelerator_view &_Av, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Definition: amp_graphics.h:2495
unorm_3::value_type _Scalar_type
Definition: amp_graphics.h:191
texture(const texture &_Src, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av)
Copy constructor. Deep copy
Definition: amp_graphics.h:2128
_Event _Copy_async_impl(const void *_Src, unsigned int _Src_byte_size, const _Texture_base< _Value_type, _Rank > &_Dst, const index< _Rank > &_Offset, const Concurrency::extent< _Rank > &_Copy_extent)
Definition: amp_graphics.h:3954
Definition: amp_graphics.h:3656
short_vector< typename details::_Texture_base< _Value_type, _Rank >::scalar_type, 4 >::type gather_return_type
Definition: amp_graphics.h:3161
texture(int _E0, int _E1, int _E2, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with three integers and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view.
Definition: amp_graphics.h:1938
texture_view(const texture< _Value_type, _Rank > &_Src) __CPU_ONLY
Construct a texture_view of a texture _Src on the host.
Definition: amp_graphics.h:3182
texture_view(texture< _Value_type, _Rank > &_Src, unsigned int _Mipmap_level=0) __CPU_ONLY
Construct a texture_view of a texture _Src on host.
Definition: amp_graphics.h:2711
int_3::value_type _Scalar_type
Definition: amp_graphics.h:110
static const _Short_vector_base_type_id _Format_base_type_id
Definition: amp_graphics.h:42
texture(int _E0, int _E1, int _E2, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with three integers and specified bits per scalar element...
Definition: amp_graphics.h:1978
_AMPIMP ULONG _Start_copy_event_helper(const _Buffer_descriptor &_Src, const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy)
size_t _Get_height(unsigned int _Mip_offset=0) const
Definition: amprt.h:2215
void _Set_view_mipmap_levels(unsigned int _View_mipmap_levels) __CPU_ONLY
Definition: amprt.h:669
__declspec(property(get=get_address_mode)) Concurrency Concurrency::graphics::address_mode get_address_mode() const __GPU
Returns the sampler's address mode
Definition: amp_graphics.h:3086
Concurrency::graphics::address_mode _M_address_mode
Definition: amp_graphics.h:3143
void _Set_texture_ptr(_In_opt_ _Texture *_Texture_ptr) __CPU_ONLY
Definition: amprt.h:674
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Definition: amp_graphics.h:2530
texture(texture &&_Other)
Move constructor
Definition: amp_graphics.h:2095
Definition: amp_graphics.h:2915
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:1089
Definition: amp_graphics.h:2905
void _Initialize(const Concurrency::accelerator_view &_Av, const details::_Texture_base< _Value_type, _Rank > &_Src) __CPU_ONLY
Definition: amp_graphics.h:2581
texture(const texture &_Src, const Concurrency::accelerator_view &_Av)
Copy constructor. Deep copy
Definition: amp_graphics.h:2109
Concurrency::details::_Texture_descriptor _Texture_descriptor
Definition: amp_graphics.h:500
double value_type
Definition: amp_short_vectors.h:18593
__declspec(property(get=get_filter_mode)) Concurrency Concurrency::graphics::filter_mode get_filter_mode() const __GPU
Returns the sampler's filter mode
Definition: amp_graphics.h:3077
const _Value_type operator()(const index< _Rank > &_Index) const __GPU_ONLY
Get the element value indexed by _Index.
Definition: amp_graphics.h:2811
void _Is_valid_data_length(unsigned int _Num_elems, unsigned int _Bits_per_elem)
Definition: amp_graphics.h:596
texture< _Value_type, _Rank > make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format=DXGI_FORMAT_UNKNOWN) __CPU_ONLY
Create an texture from a D3D texture interface pointer, optionally using the specified DXGI format fo...
Definition: amp_graphics.h:4896
texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with integer _E0 and initialized from a pair of iterators into a con...
Definition: amp_graphics.h:1118
Represent a short vector of 2 float's.
Definition: amp_short_vectors.h:7874
sampler(filter_mode _Filter_mode, address_mode _Address_mode, float_4 _Border_color=float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY
Constructs a sampler with specified filter mode (same for min, mag, mip), addressing mode (same for a...
Definition: amp_graphics.h:2991
float_3::value_type _Scalar_type
Definition: amp_graphics.h:151
Represent a short vector of 3 norm's.
Definition: amp_short_vectors.h:15325
texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with three integers and initialized from a pair of iterators into a ...
Definition: amp_graphics.h:1252
_Texture_descriptor _M_texture_descriptor
Definition: amp_graphics.h:593
const _Value_type operator()(const index< _Rank > &_Index) const __GPU_ONLY
Get the element value indexed by _Index.
Definition: amp_graphics.h:2269
A texture_view provides read and write access to a texture. Note that currently texture_view can only...
Definition: amp_graphics.h:625
double_3::value_type _Scalar_type
Definition: amp_graphics.h:272
Represent a short vector of 4 float's.
Definition: amp_short_vectors.h:8947
_Ret_ _Texture * _Get_texture() const __CPU_ONLY
Definition: amp_graphics.h:576
texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with two integers and initialized from a pair of iterators into a co...
Definition: amp_graphics.h:1182
double value_type
Definition: amp_short_vectors.h:18935
const gather_return_type gather_green(const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the predefined sampling configuration and return th...
Definition: amp_graphics.h:3574
float_4::value_type _Scalar_type
Definition: amp_graphics.h:161
_Texture_base(const Concurrency::extent< _Rank > &_Ext, unsigned int _Mipmap_levels=1) __CPU_ONLY
Definition: amp_graphics.h:508
bool operator==(const _Texture_base &_Other) const __CPU_ONLY
Definition: amp_graphics.h:567
const gather_return_type _Gather(const sampler &_Sampler, const coordinates_type &_Coord, unsigned int _Component) const __GPU_ONLY
Definition: amp_graphics.h:3622
const _Value_type operator[](const index< _Rank > &_Index) const __GPU_ONLY
Get the element value indexed by _Index.
Definition: amp_graphics.h:2776
integral_constant< bool, true > true_type
Definition: xtr1common:40
Definition: type_traits:931
constexpr remove_reference< _Ty >::type && move(_Ty &&_Arg) _NOEXCEPT
Definition: type_traits:1290
Define an N-dimensional index point; which may also be viewed as a vector based at the origin in N-sp...
Definition: amp.h:53
unsigned int value_type
Definition: amp_short_vectors.h:927
static const unsigned int _Num_channels
Definition: amp_graphics.h:43
texture_view(const texture_view< _Value_type, _Rank > &_Other) __CPU_ONLY
Construct a read-only texture_view of a writable texture_view.
Definition: amp_graphics.h:3216
#define _Inout_
Definition: sal.h:375
filter_mode
filter modes supported for texture sampling
Definition: amp_graphics.h:2902
texture(int _E0, int _E1, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with two integers and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view.
Definition: amp_graphics.h:1867
bool _Are_mipmap_levels_overlapping(const _Texture_base &_Other) const __CPU_ONLY
Definition: amp_graphics.h:586
uint_4::value_type _Scalar_type
Definition: amp_graphics.h:80
texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Construct a texture with three integers and initialized from a pair of iterators into a containe...
Definition: amp_graphics.h:1004
texture_view(const texture_view< _Value_type, _Rank > &_Other) __GPU
Construct a texture_view from another texture_view. Both are views of the same texture.
Definition: amp_graphics.h:2739
_Ret_ _Texture * _Get_texture_ptr() const __CPU_ONLY
Definition: amprt.h:653
Definition: amprt.h:318
texture(const Concurrency::extent< _Rank > &_Ext) __CPU_ONLY
Construct a texture from extents.
Definition: amp_graphics.h:666
Concurrency::extent< _Rank > _M_extent
Definition: amp_graphics.h:592
Concurrency::details::_Sampler_descriptor _Sampler_descriptor
Definition: amp_graphics.h:3102
Definition: type_traits:950
Definition: amprt.h:298
_Texture_base(const Concurrency::extent< _Rank > &_Ext, const _Texture_descriptor &_Desc) __CPU_ONLY
Definition: amp_graphics.h:552
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Definition: amp_graphics.h:2500
texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from three integer extents, bound to a specific accelerator_view.
Definition: amp_graphics.h:873
basic_stringstream< char, char_traits< char >, allocator< char > > stringstream
Definition: iosfwd:688
texture(const Concurrency::extent< _Rank > &_Ext, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from extents, bound to a specific accelerator_view and an associated acce...
Definition: amp_graphics.h:760
float value_type
Definition: amp_short_vectors.h:7877
int value_type
Definition: amp_short_vectors.h:5403
texture(int _E0, int _E1, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with two integers and specified bits per scalar element, initialized from a ...
Definition: amp_graphics.h:1682
unsigned int get_bits_per_scalar_element() const __CPU_ONLY
Definition: amp_graphics.h:469
~texture_view() __GPU
Destructor
Definition: amp_graphics.h:2763
sampler make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY
Create a sampler from a D3D sampler state interface pointer.
Definition: amp_graphics.h:4935
Definition: amprt.h:92
Definition: amp_graphics.h:2917
texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Construct a texture with two integers and initialized from a pair of iterators into a container...
Definition: amp_graphics.h:974
void _Is_valid_mipmap_range(unsigned int _Src_view_mipmap_levels, unsigned int _Dst_most_detailed_level, unsigned int _Dst_view_mipmap_levels)
Definition: xxamp.h:1266
concurrency::completion_future copy_async(const _Src_type &_Src, _Out_ void *_Dst, unsigned int _Dst_byte_size)
Asynchronously copies the contents of the source texture into the destination host buffer...
Definition: amp_graphics.h:4269
void _Set_sampler_ptr(_In_opt_ _Sampler *_Sampler_ptr) __CPU_ONLY
Definition: amprt.h:785
texture(int _E0, int _E1, int _E2) __CPU_ONLY
Construct a texture from three integer extents.
Definition: amp_graphics.h:719
const gather_return_type gather_green(const sampler &_Sampler, const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the specified sampling configuration and return the...
Definition: amp_graphics.h:3498
#define __GPU_ONLY
Definition: amprt.h:46
norm_3::value_type _Scalar_type
Definition: amp_graphics.h:231
size_t _Get_width(unsigned int _Mip_offset=0) const
Definition: amprt.h:2210
_Ret_ IUnknown * get_sampler(const Concurrency::accelerator_view &_Av, const sampler &_Sampler) __CPU_ONLY
Get the D3D sampler state interface on the given accelerator view that represents the specified sampl...
Definition: amp_graphics.h:4921
int value_type
Definition: amp_short_vectors.h:4600
bool _Is_staging() const
Definition: amprt.h:2051
Definition: amp_graphics.h:2918
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, const details::_Texture_base< _Value_type, _Rank > &_Src) __CPU_ONLY
Definition: amp_graphics.h:2552
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Definition: amp_graphics.h:2428
bool _Is_cpu_accelerator(const accelerator &_Accl)
Definition: amprt.h:3401
~texture() __CPU_ONLY
Destructor
Definition: amp_graphics.h:2225
struct Concurrency::details::_Texture_descriptor _Texture_descriptor
texture_view(const texture< _Value_type, _Rank > &_Src, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY
Construct a read-only texture_view with specific range of mipmap levels of a texture _Src on the host...
Definition: amp_graphics.h:3202
bool _Should_create_staging_texture(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av)
Definition: amp_graphics.h:2423
texture(int _E0, int _E1, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with two integers and specified bits per scalar element...
Definition: amp_graphics.h:1904
_In_ int _Value
Definition: setjmp.h:173
sampler & operator=(sampler &&_Other) __GPU
Move assignment operator.
Definition: amp_graphics.h:3059
Definition: amp_graphics.h:2904
texture_view(texture< _Value_type, _Rank > &_Src) __GPU_ONLY
Construct a texture_view of a texture _Src on an accelerator.
Definition: amp_graphics.h:2725
unsigned int _Get_bits_per_channel() const
Definition: amprt.h:2245
Definition: set:42
_Texture_base() __CPU_ONLY
Definition: amp_graphics.h:502
Represent a short vector of 3 unsigned int's.
Definition: amp_short_vectors.h:924
Definition: amprt.h:296
_FwdIt _Last
Definition: algorithm:1936
unorm_4::value_type _Scalar_type
Definition: amp_graphics.h:201
Represent a short vector of 2 norm's.
Definition: amp_short_vectors.h:14929
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, const void *_Source, unsigned int _Src_byte_size) __CPU_ONLY
Definition: amp_graphics.h:2541
const _Texture_descriptor & _Get_texture_descriptor(const _Texture_type &_Tex) __GPU
Definition: xxamp.h:1094
texture(const texture_view< _Value_type, _Rank > &_Src, const Concurrency::accelerator_view &_Acc_view, const Concurrency::accelerator_view &_Associated_av)
Construct a staging texture from a texture_view on another accelerator_view. Deep copy ...
Definition: amp_graphics.h:2052
_AMPIMP void _Write_end_event(ULONG _Span_id)
#define _Ret_
Definition: sal.h:996
int value_type
Definition: amp_short_vectors.h:4192
concurrency::completion_future _Start_async_op_wait_event_helper(ULONG _Async_op_id, _Event _Ev)
Definition: amprt.h:3753
texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with three integers and specified bits per scalar element ...
Definition: amp_graphics.h:1363
Represent a short vector of 4 norm's.
Definition: amp_short_vectors.h:16120
__declspec(property(get=get_extent)) Concurrency Concurrency::extent< _Rank > get_extent() const __GPU
Returns the extent that defines the shape of this texture or texture view.
Definition: amp_graphics.h:420
texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Construct a texture with the extent _E0 and from a pair of iterators into a container.
Definition: amp_graphics.h:948
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels) __CPU_ONLY
Construct a texture from extents, specified bits per scalar element and number of mipmap levels ...
Definition: amp_graphics.h:1295
texture(const texture_view< const _Value_type, _Rank > &_Src)
Construct a texture from a read-only texture_view. Deep copy
Definition: amp_graphics.h:2003
class __declspec(deprecated("writeonly_texture_view is deprecated. Please use texture_view instead.")) writeonly_texture_view
A writeonly_texture_view provides writeonly access to a texture.
Definition: amp_graphics.h:2598
texture_view< _Value_type, _Rank > & operator=(const texture_view< _Value_type, _Rank > &_Other) __GPU
Assignment operator. This texture_view becomes a view of the same texture which _Other is a view of...
Definition: amp_graphics.h:2750