STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
amp_graphics.h
Go to the documentation of this file.
1 /***
2 * ==++==
3 *
4 * Copyright (c) Microsoft Corporation. All rights reserved.
5 *
6 * ==--==
7 * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
8 *
9 * amp_graphics.h
10 *
11 * C++ AMP Graphics Library
12 *
13 * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
14 ****/
15 
16 #pragma once
17 
18 #include <amp_short_vectors.h>
19 #include <array>
20 #include <dxgiformat.h>
21 #include <sstream>
22 
23 #define _AMP_GRAPHICS_H
24 
25 namespace Concurrency
26 {
27 
28 namespace graphics
29 {
30 
31 namespace details
32 {
33 
34 #pragma warning( push )
35 #pragma warning( disable : 6326 ) // Potential comparison of a constant with another constant
36 
37 template<typename _Ty>
39 {
40  typedef void _Scalar_type;
41  static const bool _Is_valid_SVT_for_texture = false;
43  static const unsigned int _Num_channels = 0;
44  static const unsigned int _Default_bits_per_channel = 0;
45 };
46 
47 template<>
49 {
50  typedef unsigned int _Scalar_type;
51  static const bool _Is_valid_SVT_for_texture = true;
53  static const unsigned int _Num_channels = 1;
54  static const unsigned int _Default_bits_per_channel = 32;
55 };
56 
57 template<>
59 {
61  static const bool _Is_valid_SVT_for_texture = true;
63  static const unsigned int _Num_channels = 2;
64  static const unsigned int _Default_bits_per_channel = 32;
65 };
66 
67 template<>
69 {
71  static const bool _Is_valid_SVT_for_texture = true;
73  static const unsigned int _Num_channels = 3;
74  static const unsigned int _Default_bits_per_channel = 32;
75 };
76 
77 template<>
79 {
81  static const bool _Is_valid_SVT_for_texture = true;
83  static const unsigned int _Num_channels = 4;
84  static const unsigned int _Default_bits_per_channel = 32;
85 };
86 
87 template<>
89 {
90  typedef int _Scalar_type;
91  static const bool _Is_valid_SVT_for_texture = true;
93  static const unsigned int _Num_channels = 1;
94  static const unsigned int _Default_bits_per_channel = 32;
95 };
96 
97 template<>
99 {
101  static const bool _Is_valid_SVT_for_texture = true;
103  static const unsigned int _Num_channels = 2;
104  static const unsigned int _Default_bits_per_channel = 32;
105 };
106 
107 template<>
109 {
111  static const bool _Is_valid_SVT_for_texture = true;
113  static const unsigned int _Num_channels = 3;
114  static const unsigned int _Default_bits_per_channel = 32;
115 };
116 
117 template<>
119 {
121  static const bool _Is_valid_SVT_for_texture = true;
123  static const unsigned int _Num_channels = 4;
124  static const unsigned int _Default_bits_per_channel = 32;
125 };
126 
127 
128 template<>
130 {
131  typedef float _Scalar_type;
132  static const bool _Is_valid_SVT_for_texture = true;
134  static const unsigned int _Num_channels = 1;
135  static const unsigned int _Default_bits_per_channel = 32;
136 };
137 
138 template<>
140 {
142  static const bool _Is_valid_SVT_for_texture = true;
144  static const unsigned int _Num_channels = 2;
145  static const unsigned int _Default_bits_per_channel = 32;
146 };
147 
148 template<>
150 {
152  static const bool _Is_valid_SVT_for_texture = true;
154  static const unsigned int _Num_channels = 3;
155  static const unsigned int _Default_bits_per_channel = 32;
156 };
157 
158 template<>
160 {
162  static const bool _Is_valid_SVT_for_texture = true;
164  static const unsigned int _Num_channels = 4;
165  static const unsigned int _Default_bits_per_channel = 32;
166 };
167 
168 template<>
170 {
172  static const bool _Is_valid_SVT_for_texture = true;
174  static const unsigned int _Num_channels = 1;
175  static const unsigned int _Default_bits_per_channel = 16;
176 };
177 
178 template<>
180 {
182  static const bool _Is_valid_SVT_for_texture = true;
184  static const unsigned int _Num_channels = 2;
185  static const unsigned int _Default_bits_per_channel = 16;
186 };
187 
188 template<>
190 {
192  static const bool _Is_valid_SVT_for_texture = false;
194  static const unsigned int _Num_channels = 0;
195  static const unsigned int _Default_bits_per_channel = 0;
196 };
197 
198 template<>
200 {
202  static const bool _Is_valid_SVT_for_texture = true;
204  static const unsigned int _Num_channels = 4;
205  static const unsigned int _Default_bits_per_channel = 16;
206 };
207 
208 template<>
210 {
212  static const bool _Is_valid_SVT_for_texture = true;
214  static const unsigned int _Num_channels = 1;
215  static const unsigned int _Default_bits_per_channel = 16;
216 };
217 
218 template<>
220 {
222  static const bool _Is_valid_SVT_for_texture = true;
224  static const unsigned int _Num_channels = 2;
225  static const unsigned int _Default_bits_per_channel = 16;
226 };
227 
228 template<>
230 {
232  static const bool _Is_valid_SVT_for_texture = false;
234  static const unsigned int _Num_channels = 0;
235  static const unsigned int _Default_bits_per_channel = 0;
236 };
237 
238 template<>
240 {
242  static const bool _Is_valid_SVT_for_texture = true;
244  static const unsigned int _Num_channels = 4;
245  static const unsigned int _Default_bits_per_channel = 16;
246 };
247 
248 
249 template<>
251 {
252  typedef double _Scalar_type;
253  static const bool _Is_valid_SVT_for_texture = true;
255  static const unsigned int _Num_channels = 2;
256  static const unsigned int _Default_bits_per_channel = 32;
257 };
258 
259 template<>
261 {
263  static const bool _Is_valid_SVT_for_texture = true;
265  static const unsigned int _Num_channels = 4;
266  static const unsigned int _Default_bits_per_channel = 32;
267 };
268 
269 template<>
271 {
273  static const bool _Is_valid_SVT_for_texture = false;
275  static const unsigned int _Num_channels = 0;
276  static const unsigned int _Default_bits_per_channel = 0;
277 };
278 
279 template<>
281 {
283  static const bool _Is_valid_SVT_for_texture = false;
285  static const unsigned int _Num_channels = 0;
286  static const unsigned int _Default_bits_per_channel = 0;
287 };
288 
289 template<typename _Short_vector_type>
291 {
295 }
296 
297 template<int _Rank>
298 std::array<size_t, 3> _Get_dimensions(const Concurrency::extent<_Rank> & _Ext, unsigned int _Mip_offset)
299 {
300  std::array<size_t, 3> _Arr;
301  // For un-used dimensions, use value 1.
302  switch((_Rank)) {
303  case 1:
304  _Arr[0] = static_cast<size_t>((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U);
305  _Arr[1] = 1;
306  _Arr[2] = 1;
307  break;
308  case 2:
309  _Arr[0] = static_cast<size_t>((_Ext[1] >> _Mip_offset) ? (_Ext[1] >> _Mip_offset) : 1U);
310  _Arr[1] = static_cast<size_t>((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U);
311  _Arr[2] = 1;
312  break;
313  case 3:
314  _Arr[0] = static_cast<size_t>((_Ext[2] >> _Mip_offset) ? (_Ext[2] >> _Mip_offset) : 1U);
315  _Arr[1] = static_cast<size_t>((_Ext[1] >> _Mip_offset) ? (_Ext[1] >> _Mip_offset) : 1U);
316  _Arr[2] = static_cast<size_t>((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U);
317  break;
318  default:
319  _ASSERTE(false);
320  _Arr[0] = 1;
321  _Arr[1] = 1;
322  _Arr[2] = 1;
323  break;
324  }
325  return _Arr;
326 }
327 
328 template <int _Rank>
329 std::array<size_t, 3> _Get_indices(const index<_Rank> &_Idx)
330 {
331  std::array<size_t, 3> _Arr;
332  // For un-used dimensions, use value 0.
333  switch((_Rank)) {
334  case 1:
335  _Arr[0] = static_cast<size_t>(_Idx[0]);
336  _Arr[1] = 0;
337  _Arr[2] = 0;
338  break;
339  case 2:
340  _Arr[0] = static_cast<size_t>(_Idx[1]);
341  _Arr[1] = static_cast<size_t>(_Idx[0]);
342  _Arr[2] = 0;
343  break;
344  case 3:
345  _Arr[0] = static_cast<size_t>(_Idx[2]);
346  _Arr[1] = static_cast<size_t>(_Idx[1]);
347  _Arr[2] = static_cast<size_t>(_Idx[0]);
348  break;
349  default:
350  _ASSERTE(false);
351  _Arr[0] = 0;
352  _Arr[1] = 0;
353  _Arr[2] = 0;
354  break;
355  }
356  return _Arr;
357 }
358 
359 template<int _Rank>
360 Concurrency::extent<_Rank> _Create_extent(size_t _Width, size_t _Height, size_t _Depth)
361 {
363  switch((_Rank)) {
364  case 1:
365  _Ext[0] = static_cast<int>(_Width);
366  break;
367  case 2:
368  _Ext[0] = static_cast<int>(_Height);
369  _Ext[1] = static_cast<int>(_Width);
370  break;
371  case 3:
372  _Ext[0] = static_cast<int>(_Depth);
373  _Ext[1] = static_cast<int>(_Height);
374  _Ext[2] = static_cast<int>(_Width);
375  break;
376  default:
377  _ASSERTE(false);
378  break;
379  }
380  return _Ext;
381 }
382 
383 // forward declaration
384 template <typename _Value_type, int _Rank> class _Texture_base;
385 template <typename _Value_type, int _Rank>
386 _Event _Copy_async_impl(const void * _Src, unsigned int _Src_byte_size, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Offset, const Concurrency::extent<_Rank> &_Copy_extent);
387 template <typename OutputIterator, typename _Value_type, int _Rank>
388 _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank> &_Src, OutputIterator _Dest_iter);
389 
390 template<typename _Value_type, int _Rank>
392  const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent);
393 
394 // The base class for texture, writeonly_texture_view
395 template <typename _Value_type, int _Rank>
396 class _Texture_base
397 {
398  static_assert(_Rank > 0 && _Rank <= 3, "texture is only supported for rank 1, 2, and 3.");
399  static_assert(_Short_vector_type_traits<typename std::remove_const<_Value_type>::type>::_Is_valid_SVT_for_texture, "invalid value_type for a texture.");
400 
401  // Friends
402  template<typename _T>
404  template<typename _T>
406  template<typename _Value_type, int _Rank>
408  const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent) __CPU_ONLY;
409 
410 public:
411  static const int rank = _Rank;
412  typedef typename _Value_type value_type;
414 
415 public:
419  __declspec(property(get=get_extent)) Concurrency::extent<_Rank> extent;
420  Concurrency::extent<_Rank> get_extent() const __GPU
421  {
422  return _M_extent;
423  }
424 
431  Concurrency::extent<_Rank> get_mipmap_extent(unsigned int _Mipmap_level) const __CPU_ONLY
432  {
433  if (_Mipmap_level >= this->get_mipmap_levels())
434  {
435  std::stringstream _Err_msg;
436  _Err_msg << "Value for _Mipmap_level parameter (" << _Mipmap_level
437  << ") cannot be greater than or equal to number of mipmap levels ("
438  << this->get_mipmap_levels() << ") on the texture or texture view";
439 
440  throw runtime_exception(_Err_msg.str().c_str(), E_INVALIDARG);
441  }
443  }
444 
451  Concurrency::extent<_Rank> get_mipmap_extent(unsigned int _Mipmap_level) const __GPU_ONLY
452  {
454  }
455 
460  Concurrency::accelerator_view get_accelerator_view() const __CPU_ONLY
461  {
463  }
464 
468  __declspec(property(get=get_bits_per_scalar_element)) unsigned int bits_per_scalar_element;
469  unsigned int get_bits_per_scalar_element() const __CPU_ONLY
470  {
471  unsigned int _Bits_per_channel = _Get_texture()->_Get_bits_per_channel();
472  return _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type ? _Bits_per_channel * (sizeof(double)/sizeof(int)) : _Bits_per_channel;
473  }
474 
481  __declspec(property(get=get_mipmap_levels)) unsigned int mipmap_levels;
482  unsigned int get_mipmap_levels() const __GPU
483  {
485  }
486 
492  __declspec(property(get=get_data_length)) unsigned int data_length;
493  unsigned int get_data_length() const __CPU_ONLY
494  {
496  }
497 
498 protected:
499  // internal storage abstraction
501 
502  _Texture_base() __CPU_ONLY
503  {
504  // This default ctor is required to enable move ctor for a derived types,
505  // empty _Texture_base is later initialized by move assigment operator
506  }
507 
508  _Texture_base(const Concurrency::extent<_Rank>& _Ext, unsigned int _Mipmap_levels = 1) __CPU_ONLY
509  : _M_extent(_Ext), _M_texture_descriptor(/*_Most_detailed_mipmap_level=*/0, _Mipmap_levels)
510  {
512  _Are_valid_mipmap_parameters(/*_Most_detailed_mipmap_level=*/0, _Mipmap_levels);
513 
514  // Validate if we can generate _Mipmap_levels number of mipmap levels given the dimensionality of the texture
515  unsigned int _Max_mipmap_levels = _Get_max_mipmap_levels(_M_extent);
516  if (_Mipmap_levels > _Max_mipmap_levels)
517  {
518  std::stringstream _Err_msg;
519  _Err_msg << "The texture extent is too small to generate (" << _Mipmap_levels << ") mipmap levels, the maximum allowed is (" << _Max_mipmap_levels << ")";
520  throw runtime_exception(_Err_msg.str().c_str(), E_INVALIDARG);
521  }
522  else if (_Mipmap_levels == 0)
523  {
524  // Generate full range of all mipmaps
525  // e.g. 2D 10x2 texture would have: 10x2, 5x1, 2x1, 1x1 (4 mipmap levels)
526  _Mipmap_levels = _Max_mipmap_levels;
527  }
528  _M_texture_descriptor._Set_view_mipmap_levels(_Mipmap_levels);
529  }
530 
531  // shallow copy for texture_views
533  : _M_extent(_Src._M_extent), _M_texture_descriptor(_Src._M_texture_descriptor)
534  {
535  }
536 
537  // shallow copy for texture_views that redefine range of mipmaps
538  _Texture_base(const _Texture_base & _Src, unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels) __CPU_ONLY
539  : _M_extent(_Get_extent_at_level(_Src.extent, _Most_detailed_mipmap_level)), _M_texture_descriptor(_Src._M_texture_descriptor, _Src._Get_most_detailed_mipmap_level() + _Most_detailed_mipmap_level, _View_mipmap_levels)
540  {
541  Concurrency::details::_Is_valid_mipmap_range(_Src.get_mipmap_levels(), _Most_detailed_mipmap_level, _View_mipmap_levels);
542  }
543 
544  // shallow copy for texture_views that in restrict(amp) context, the texture views can no longer redefine mipmap range,
545  // but read-write texture view needs to flatten to single mipmap level when created over a texture with multiple mipmap levels.
546  _Texture_base(const _Texture_base & _Src, bool _Flatten_mipmap_levels) __GPU_ONLY
547  : _M_extent(_Src.extent), _M_texture_descriptor(_Src._M_texture_descriptor, /*_Most_detailed_mipmap_level=*/0, _Flatten_mipmap_levels ? /*_View_mipmap_levels=*/1 : _Src.get_mipmap_levels())
548  {
549  }
550 
551  // interop
552  _Texture_base(const Concurrency::extent<_Rank>& _Ext, const _Texture_descriptor & _Desc) __CPU_ONLY
554  {
556  }
557 
558  void _Copy_to(const _Texture_base & _Dest) const __CPU_ONLY
559  {
560  if (!(*this == _Dest))
561  {
562  _ASSERTE(this->extent == _Dest.extent);
563  details::_Copy_async_impl(*this, index<_Rank>(), _Dest, index<_Rank>(), _Dest.extent)._Get();
564  }
565  }
566 
567  bool operator==(const _Texture_base & _Other) const __CPU_ONLY
568  {
569  return _Other._M_extent == _M_extent && _Other._M_texture_descriptor == _M_texture_descriptor;
570  }
571 
573  {
574  }
575 
576  _Ret_ _Texture* _Get_texture() const __CPU_ONLY
577  {
579  }
580 
581  unsigned int _Get_most_detailed_mipmap_level() const __GPU
582  {
584  }
585 
586  bool _Are_mipmap_levels_overlapping(const _Texture_base &_Other) const __CPU_ONLY
587  {
588  return _M_texture_descriptor._Are_mipmap_levels_overlapping(&_Other._M_texture_descriptor);
589  }
590 
591 protected:
593  _Texture_descriptor _M_texture_descriptor;
594 };
595 
596 inline void _Is_valid_data_length(unsigned int _Num_elems, unsigned int _Bits_per_elem)
597 {
598  unsigned long long _Bytes_per_elem = static_cast<unsigned long long>(_Bits_per_elem / 8U);
599  unsigned long long _Total_bytes = static_cast<unsigned long long>(_Num_elems) * _Bytes_per_elem;
600  if (_Total_bytes > static_cast<unsigned long long>(UINT_MAX))
601  {
602  throw runtime_exception("Invalid - texture data_length exceeds UINT_MAX", E_INVALIDARG);
603  }
604 }
605 
606 } // namespace details
607 
608 
610 
611 // forward declarations
612 template <typename _Value_type, int _Rank>
613 class texture;
614 template <typename _Value_type, int _Rank>
616 template <typename _Value_type, int _Rank>
618 class sampler;
619 
620 namespace direct3d
621 {
622 template<typename _Value_type, int _Rank>
623 texture<_Value_type, _Rank> make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format = DXGI_FORMAT_UNKNOWN) __CPU_ONLY;
624 
625 sampler make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY;
627 
628 } // namespace direct3d
629 
643 template <typename _Value_type, int _Rank> class texture : public details::_Texture_base<_Value_type, _Rank>
644 {
645  template<typename _Value_type, int _Rank>
646  friend texture<_Value_type,_Rank> direct3d::make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format) __CPU_ONLY;
647 
648  static_assert(!std::is_const<_Value_type>::value, "const value type is not supported for texture.");
649 
650 public:
651 
660  {
661  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
662  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
664  }
665 
672  texture(int _E0) __CPU_ONLY
674  {
675  static_assert(_Rank == 1, "texture(int) is only permissible on texture<value_type, 1>.");
676  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
677  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
679  }
680 
690  texture(int _E0, int _E1) __CPU_ONLY
692  {
693  static_assert(_Rank == 2, "texture(int, int) is only permissible on texture<value_type, 2>.");
694  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
695  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
697  }
698 
711  texture(int _E0, int _E1, int _E2) __CPU_ONLY
712  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
713  {
714  static_assert(_Rank == 3, "texture(int, int, int) is only permissible on texture<value_type, 3>.");
715  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
716  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
718  }
719 
731  {
732  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
733  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
734  _Initialize(_Av);
735  }
736 
754  {
755  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
756  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
757  _Initialize(_Av, _Associated_av);
758  }
759 
769  texture(int _E0, const Concurrency::accelerator_view& _Av) __CPU_ONLY
771  {
772  static_assert(_Rank == 1, "texture(int, accelerator_view) is only permissible on texture<value_type, 1>.");
773  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
774  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
775  _Initialize(_Av);
776  }
777 
793  texture(int _E0, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
795  {
796  static_assert(_Rank == 1, "texture(int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 1>.");
797  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
798  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
799  _Initialize(_Av, _Associated_av);
800  }
801 
814  texture(int _E0, int _E1, const Concurrency::accelerator_view& _Av) __CPU_ONLY
816  {
817  static_assert(_Rank == 2, "texture(int, int, accelerator_view) is only permissible on texture<value_type, 2>.");
818  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
819  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
820  _Initialize(_Av);
821  }
822 
841  texture(int _E0, int _E1, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
843  {
844  static_assert(_Rank == 2, "texture(int, int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 2>.");
845  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
846  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
847  _Initialize(_Av, _Associated_av);
848  }
849 
865  texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view& _Av) __CPU_ONLY
866  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
867  {
868  static_assert(_Rank == 3, "texture(int, int, int, accelerator_view) is only permissible on texture<value_type, 3>.");
869  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
870  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
871  _Initialize(_Av);
872  }
873 
895  texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
896  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
897  {
898  static_assert(_Rank == 3, "texture(int, int, int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 3>.");
899  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
900  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
901  _Initialize(_Av, _Associated_av);
902  }
903 
916  template<typename _Input_iterator> texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
918  {
919  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
920  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
921  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last);
922  }
923 
936  template<typename _Input_iterator> texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
938  {
939  static_assert(_Rank == 1, "texture(int, iterator, iterator) is only permissible on texture<value_type, 1>.");
940  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
941  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
942  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last);
943  }
944 
960  template<typename _Input_iterator> texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
962  {
963  static_assert(_Rank == 2, "texture(int, int, iterator, iterator) is only permissible on texture<value_type, 2>.");
964  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
965  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
966  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last);
967  }
968 
969 
988  template<typename _Input_iterator> texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
989  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
990  {
991  static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator) is only permissible on texture<value_type, 3>.");
992  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
993  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
994  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last);
995  }
996 
1012  template<typename _Input_iterator> texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1013  : _Texture_base(_Ext)
1014  {
1015  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1016  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1017  _Initialize(_Av, _Src_first, _Src_last);
1018  }
1019 
1041  template<typename _Input_iterator> texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1042  : _Texture_base(_Ext)
1043  {
1044  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1045  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1046  _Initialize(_Av, _Associated_av, _Src_first, _Src_last);
1047  }
1048 
1064  template<typename _Input_iterator> texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1066  {
1067  static_assert(_Rank == 1, "texture(int, iterator, iterator, accelerator_view) is only permissible on texture<value_type, 1>.");
1068  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1069  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1070  _Initialize(_Av, _Src_first, _Src_last);
1071  }
1072 
1094  template<typename _Input_iterator> texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1096  {
1097  static_assert(_Rank == 1, "texture(int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture<value_type, 1>.");
1098  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1099  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1100  _Initialize(_Av, _Associated_av, _Src_first, _Src_last);
1101  }
1102 
1121  template<typename _Input_iterator> texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1123  {
1124  static_assert(_Rank == 2, "texture(int, int, iterator, iterator, accelerator_view) is only permissible on texture<value_type, 2>.");
1125  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1126  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1127  _Initialize(_Av, _Src_first, _Src_last);
1128  }
1129 
1154  template<typename _Input_iterator> texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1156  {
1157  static_assert(_Rank == 2, "texture(int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture<value_type, 2>.");
1158  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1159  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1160  _Initialize(_Av, _Associated_av, _Src_first, _Src_last);
1161  }
1162 
1184  template<typename _Input_iterator> texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1185  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1186  {
1187  static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator, accelerator_view) is only permissible on texture<value_type, 3>.");
1188  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1189  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1190  _Initialize(_Av, _Src_first, _Src_last);
1191  }
1192 
1220  template<typename _Input_iterator> texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1221  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1222  {
1223  static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture<value_type, 3>.");
1224  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor.");
1225  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor.");
1226  _Initialize(_Av, _Associated_av, _Src_first, _Src_last);
1227  }
1228 
1241  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1242  : _Texture_base(_Ext)
1243  {
1244  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1245  }
1246 
1263  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels) __CPU_ONLY
1264  : _Texture_base(_Ext, _Mipmap_levels)
1265  {
1266  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1267  }
1268 
1281  texture(int _E0, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1283  {
1284  static_assert(_Rank == 1, "texture(int, unsigned int) is only permissible on texture<value_type, 1>.");
1285  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1286  }
1287 
1303  texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1305  {
1306  static_assert(_Rank == 2, "texture(int, int, unsigned int) is only permissible on texture<value_type, 2>.");
1307  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1308  }
1309 
1331  texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1332  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1333  {
1334  static_assert(_Rank == 3, "texture(int, int, int, unsigned int) is only permissible on texture<value_type, 3>.");
1335  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element);
1336  }
1337 
1338 
1354  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1355  : _Texture_base(_Ext)
1356  {
1357  _Initialize(_Av, _Bits_per_scalar_element);
1358  }
1359 
1379  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1380  : _Texture_base(_Ext, _Mipmap_levels)
1381  {
1382  _Initialize(_Av, _Bits_per_scalar_element);
1383  }
1384 
1406  texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1407  : _Texture_base(_Ext)
1408  {
1409  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
1410  }
1411 
1427  texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1429  {
1430  static_assert(_Rank == 1, "texture(int, unsigned int, accelerator_view) is only permissible on texture<value_type, 1>.");
1431  _Initialize(_Av, _Bits_per_scalar_element);
1432  }
1433 
1455  texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1457  {
1458  static_assert(_Rank == 1, "texture(int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 1>.");
1459  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
1460  }
1461 
1480  texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1482  {
1483  static_assert(_Rank == 2, "texture(int, int, unsigned int, accelerator_view) is only permissible on texture<value_type, 2>.");
1484  _Initialize(_Av, _Bits_per_scalar_element);
1485  }
1486 
1511  texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1513  {
1514  static_assert(_Rank == 2, "texture(int, int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 2>.");
1515  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
1516  }
1517 
1539  texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1540  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1541  {
1542  static_assert(_Rank == 3, "texture(int, int, int, unsigned int, accelerator_view) is only permissible on texture<value_type, 3>.");
1543  _Initialize(_Av, _Bits_per_scalar_element);
1544  }
1545 
1573  texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1574  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1575  {
1576  static_assert(_Rank == 3, "texture(int, int, int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 3>.");
1577  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
1578  }
1579 
1598  texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1599  : _Texture_base(_Ext)
1600  {
1601  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element);
1602  }
1603 
1622  texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1624  {
1625  static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int) is only permissible on texture<value_type, 1>.");
1626  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element);
1627  }
1628 
1650  texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1652  {
1653  static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int) is only permissible on texture<value_type, 2>.");
1654  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element);
1655  }
1656 
1657 
1682  texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
1683  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1684  {
1685  static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int) is only permissible on texture<value_type, 3>.");
1686  _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element);
1687  }
1688 
1710  texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1711  : _Texture_base(_Ext)
1712  {
1713  _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1714  }
1715 
1743  texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1744  : _Texture_base(_Ext)
1745  {
1746  _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1747  }
1748 
1770  texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1772  {
1773  static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture<value_type, 1>.");
1774  _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1775  }
1776 
1804  texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1806  {
1807  static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 1>.");
1808  _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1809  }
1810 
1835  texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1837  {
1838  static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture<value_type, 2>.");
1839  _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1840  }
1841 
1872  texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1874  {
1875  static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 2>.");
1876  _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1877  }
1878 
1906  texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY
1907  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1908  {
1909  static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture<value_type, 3>.");
1910  _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1911  }
1912 
1946  texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
1947  : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2))
1948  {
1949  static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture<value_type, 3>.");
1950  _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element);
1951  }
1952 
1960  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
1961  {
1962  _Initialize(_Src.accelerator_view, _Src);
1963  }
1964 
1972  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
1973  {
1974  _Initialize(_Src.accelerator_view, _Src);
1975  }
1976 
1987  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
1988  {
1989  _Initialize(_Acc_view, _Src);
1990  }
1991 
2002  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2003  {
2004  _Initialize(_Acc_view, _Src);
2005  }
2006 
2021  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2022  {
2023  _Initialize(_Acc_view, _Associated_av, _Src);
2024  }
2025 
2040  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2041  {
2042  _Initialize(_Acc_view, _Associated_av, _Src);
2043  }
2044 
2052  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2053  {
2054  _Initialize(_Src.accelerator_view, _Src.associated_accelerator_view, _Src);
2055  }
2056 
2063  texture(texture && _Other)
2064  {
2065  *this = std::move(_Other);
2066  }
2067 
2078  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2079  {
2080  _Initialize(_Av, _Src);
2081  }
2082 
2097  : _Texture_base(_Src.extent, _Src.get_mipmap_levels())
2098  {
2099  _Initialize(_Av, _Associated_av, _Src);
2100  }
2101 
2111  texture& operator=(const texture & _Other)
2112  {
2113  if (this != &_Other)
2114  {
2115  _M_extent = _Other._M_extent;
2117  _Initialize(_Other.accelerator_view, _Other.associated_accelerator_view, _Other);
2118  }
2119  return *this;
2120  }
2121 
2132  {
2133  if (this != &_Other)
2134  {
2135  _M_extent = _Other._M_extent;
2136  _M_texture_descriptor = _Other._M_texture_descriptor;
2137 
2138  _Other._M_texture_descriptor._M_data_ptr = NULL;
2139  _Other._M_texture_descriptor._Set_texture_ptr(NULL);
2140  }
2141  return *this;
2142  }
2143 
2150  void copy_to(texture & _Dest) const
2151  {
2152  if (this->extent != _Dest.extent)
2153  {
2154  throw runtime_exception("The source and destination textures must have the exactly the same extent.", E_INVALIDARG);
2155  }
2156 
2159  this->get_data_length());
2160 
2161  _Texture_base::_Copy_to(_Dest);
2162 
2164  }
2165 
2172 #pragma warning( push )
2173 #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated
2175  {
2176  if (this->extent != _Dest.extent)
2177  {
2178  throw runtime_exception("The source and destination textures must have the exactly the same extent.", E_INVALIDARG);
2179  }
2180 
2183  this->get_data_length());
2184 
2185  _Texture_base::_Copy_to(_Dest);
2186 
2188  }
2189 
2193  ~texture() __CPU_ONLY
2194  {
2195  }
2196 
2206  const value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY
2207  {
2208  value_type _Tmp;
2209  _Texture_read_helper<index<_Rank>, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0);
2210  return _Tmp;
2211  }
2212 
2222  const value_type operator[] (int _I0) const __GPU_ONLY
2223  {
2224  static_assert(_Rank == 1, "value_type texture::operator[](int) is only permissible on texture<value_type, 1>.");
2225  return (*this)[index<1>(_I0)];
2226  }
2227 
2237  const value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY
2238  {
2239  return (*this)[_Index];
2240  }
2241 
2251  const value_type operator() (int _I0) const __GPU_ONLY
2252  {
2253  static_assert(_Rank == 1, "value_type texture::operator()(int) is only permissible on texture<value_type, 1>.");
2254  return (*this)[index<1>(_I0)];
2255  }
2256 
2269  const value_type operator() (int _I0, int _I1) const __GPU_ONLY
2270  {
2271  static_assert(_Rank == 2, "value_type texture::operator()(int, int) is only permissible on texture<value_type, 2>.");
2272  return (*this)[index<2>(_I0, _I1)];
2273  }
2274 
2290  const value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY
2291  {
2292  static_assert(_Rank == 3, "value_type texture::operator()(int, int, int) is only permissible on texture<value_type, 3>.");
2293  return (*this)[index<3>(_I0, _I1, _I2)];
2294  }
2295 
2305  const value_type get(const index<_Rank>& _Index) const __GPU_ONLY
2306  {
2307  return (*this)[_Index];
2308  }
2309 
2319  void set(const index<_Rank>& _Index, const value_type& _Value) __GPU_ONLY
2320  {
2321  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Invalid value_type for set method.");
2322  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Invalid value_type for set method.");
2323  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Invalid value_type for set method.");
2325  }
2326 
2330  _Ret_ void* data() __CPU_ONLY
2331  {
2332  return _Get_texture()->_Get_host_ptr();
2333  }
2334 
2338  const void* data() const __CPU_ONLY
2339  {
2340  return _Get_texture()->_Get_host_ptr();
2341  }
2342 
2347  __declspec(property(get=get_row_pitch)) unsigned int row_pitch;
2348  unsigned int get_row_pitch() const __CPU_ONLY
2349  {
2350  static_assert(_Rank >= 2, "row_pitch is only applicable to staging textures with rank 2 or higher.");
2351 
2352  if (!_Get_texture()->_Is_staging()) {
2353  throw runtime_exception("row_pitch is only applicable to staging textures.", E_INVALIDARG);
2354  }
2355 
2356  return static_cast<unsigned int>(_Get_texture()->_Get_row_pitch());
2357  }
2358 
2363  __declspec(property(get=get_depth_pitch)) unsigned int depth_pitch;
2364  unsigned int get_depth_pitch() const __CPU_ONLY
2365  {
2366  static_assert(_Rank == 3, "depth_pitch is only applicable to staging textures with rank 3.");
2367 
2368  if (!_Get_texture()->_Is_staging()) {
2369  throw runtime_exception("depth_pitch is only applicable to staging textures.", E_INVALIDARG);
2370  }
2371 
2372  return static_cast<unsigned int>(_Get_texture()->_Get_depth_pitch());
2373  }
2374 
2378  __declspec(property(get=get_associated_accelerator_view)) Concurrency::accelerator_view associated_accelerator_view;
2380  {
2381  return _Get_texture()->_Get_accelerator_view();
2382  }
2383 
2384 private:
2385  // Private constructor used by make_texture to create a texture from D3D texture
2386  texture(const Concurrency::extent<_Rank> & _Ext, const _Texture_descriptor & _Descriptor)
2387  : details::_Texture_base<_Value_type, _Rank>(_Ext, _Descriptor)
2388  {
2389  }
2390 
2392  {
2393  return (_Is_cpu_accelerator(_Av.accelerator) && !_Is_cpu_accelerator(_Associated_av.accelerator));
2394  }
2395 
2396  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, unsigned int _Bits_per_scalar_element) __CPU_ONLY
2397  {
2398  if (_Bits_per_scalar_element != 8 && _Bits_per_scalar_element != 16 &&
2399  _Bits_per_scalar_element != 32 && _Bits_per_scalar_element != 64)
2400  {
2401  throw runtime_exception("Invalid _Bits_per_scalar_element argument - it can only be 8, 16, 32, or 64.", E_INVALIDARG);
2402  }
2403 
2404  // special cases for 64 and for double based textures
2405 
2406 #pragma warning( push )
2407 #pragma warning( disable : 4127 ) // conditional expression is constant
2408  if (_Bits_per_scalar_element == 64 && _Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type)
2409  {
2410  throw runtime_exception("Invalid _Bits_per_scalar_element argument - 64 is only valid for texture of double based short vector types.", E_INVALIDARG);
2411  }
2412 
2413  if (_Bits_per_scalar_element != 64 && _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type)
2414  {
2415  throw runtime_exception("Invalid _Bits_per_scalar_element argument - it can only be 64 for texture of double based short vector types.", E_INVALIDARG);
2416  }
2417 
2419 
2420  // the rest of the check is done by _Texture::_Create_texture, it depends on the underlying supported DXGI formats.
2421 
2422  unsigned int _Bits_per_channel = _Bits_per_scalar_element;
2423 
2425  {
2427  }
2428 
2429  std::array<size_t, 3> _Dimensions = Concurrency::graphics::details::_Get_dimensions(_M_extent, /*_Mip_offset=*/0);
2430 
2431  // release the old texture first before allocating new one to avoid the chance on hitting OOM
2433  _Texture_ptr _Tex_ptr = NULL;
2434 
2435  // See if we need to allocate a staging texture
2436  if (_Should_create_staging_texture(_Av, _Associated_av)) {
2437 
2439  {
2440  throw runtime_exception("Creating staging textures with mipmap levels > 1 is not supported", E_INVALIDARG);
2441  }
2442 
2443  _Tex_ptr = _Texture::_Create_stage_texture(
2444  _Associated_av, _Av, _Rank, _Dimensions[0], _Dimensions[1], _Dimensions[2], _M_texture_descriptor._Get_view_mipmap_levels(),
2447  _Bits_per_channel);
2448 
2449  // Now map the texture
2450  _Tex_ptr->_Map_buffer(_Write_access, true /* _Wait */);
2451  }
2452  else {
2453  _Tex_ptr = _Texture::_Create_texture(_Av, _Rank, _Dimensions[0], _Dimensions[1], _Dimensions[2], _M_texture_descriptor._Get_view_mipmap_levels(),
2456  _Bits_per_channel);
2457  }
2458 
2460 #pragma warning( pop )
2461  }
2462 
2463  void _Initialize(const Concurrency::accelerator_view& _Av, unsigned int _Bits_per_scalar_element) __CPU_ONLY
2464  {
2465  _Initialize(_Av, _Av, _Bits_per_scalar_element);
2466  }
2467 
2468  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY
2469  {
2470  _Initialize(_Av, _Associated_av, Concurrency::graphics::details::_Get_default_bits_per_scalar_element<_Value_type>());
2471  }
2472 
2473  void _Initialize(const Concurrency::accelerator_view& _Av) __CPU_ONLY
2474  {
2475  _Initialize(_Av, _Av);
2476  }
2477 
2478  template<typename _Input_iterator>
2479  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
2480  {
2481  _Initialize(_Av, _Associated_av);
2482 
2485  this->get_data_length());
2486 
2487  Concurrency::graphics::details::_Copy_async_impl(_Src_first, _Src_last, *this, index<_Rank>(), this->extent)._Get();
2488 
2490  }
2491 
2492  template<typename _Input_iterator>
2493  void _Initialize(const Concurrency::accelerator_view& _Av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
2494  {
2495  _Initialize(_Av, _Av, _Src_first, _Src_last);
2496  }
2497 
2498  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
2499  {
2500  _Initialize(_Av, _Associated_av, _Bits_per_scalar_element);
2501  Concurrency::graphics::copy(_Source, _Src_byte_size, *this);
2502  }
2503 
2504  void _Initialize(const Concurrency::accelerator_view& _Av, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
2505  {
2506  _Initialize(_Av, _Av, _Source, _Src_byte_size, _Bits_per_scalar_element);
2507  }
2508 
2509  void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, const void * _Source, unsigned int _Src_byte_size) __CPU_ONLY
2510  {
2511  _Initialize(_Av, _Associated_av);
2512  Concurrency::graphics::copy(_Source, _Src_byte_size, *this);
2513  }
2514 
2515  void _Initialize(const Concurrency::accelerator_view& _Av, const void * _Source, unsigned int _Src_byte_size) __CPU_ONLY
2516  {
2517  _Initialize(_Av, _Av, _Source, _Src_byte_size);
2518  }
2519 
2521  {
2522  if (_Src.bits_per_scalar_element != 0) // _Src is not created via interop
2523  {
2524  _Initialize(_Av, _Associated_av, _Src.bits_per_scalar_element);
2525  }
2526  else // _Src is created via interop, create a new texture with the same properties as the existing one.
2527  {
2528  _Texture_ptr _New_tex;
2529  if (_Should_create_staging_texture(_Av, _Associated_av))
2530  {
2531  _New_tex = _Texture::_Clone_texture(concurrency::details::_Get_texture(_Src), _Associated_av, _Av);
2532  }
2533  else
2534  {
2535  _New_tex = _Texture::_Clone_texture(concurrency::details::_Get_texture(_Src), _Av, _Associated_av);
2536  }
2538  }
2539 
2542  this->get_data_length());
2543 
2545 
2547  }
2548 
2550  {
2551  _Initialize(_Av, _Av, _Src);
2552  }
2553 };
2554 
2564 #pragma warning( push )
2565 #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated
2566 template <typename _Value_type, int _Rank> class __declspec(deprecated("writeonly_texture_view is deprecated. Please use texture_view instead.")) writeonly_texture_view : public details::_Texture_base<_Value_type, _Rank>
2567 {
2568  static_assert(!std::is_const<_Value_type>::value, "const value type is not supported for writeonly_texture_view.");
2569 
2570 public:
2578  : _Texture_base(_Src, /*_Most_detailed_mipmap_level=*/0, /*_View_mipmap_levels=*/1)
2579  {
2580  _Texture* _Tex = _Get_texture();
2581  if ((_Tex->_Get_num_channels() == 3) && (_Tex->_Get_bits_per_channel() == 32)) {
2582  throw runtime_exception("writeonly_texture_view cannot be created from a 3-channel texture with 32 bits per scalar element.", E_INVALIDARG);
2583  }
2584  if (_Tex->_Is_staging()) {
2585  throw runtime_exception("writeonly_texture_view cannot be created from a staging texture object.", E_INVALIDARG);
2586  }
2587  }
2588 
2596  : _Texture_base(_Src, /*_Flatten_mipmap_levels=*/true)
2597  {
2598  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Invalid value_type for the constructor.");
2599  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Invalid value_type for the constructor.");
2600  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Invalid value_type for the constructor.");
2601  }
2602 
2610  : _Texture_base(_Src)
2611  {
2612  }
2613 
2621  {
2622  if (this != &_Other)
2623  {
2624  _M_extent = _Other._M_extent;
2625  _M_texture_descriptor = _Other._M_texture_descriptor;
2626  }
2627  return *this;
2628  }
2629 
2633  ~writeonly_texture_view() __GPU
2634  {
2635  }
2636 
2646  void set(const index<_Rank>& _Index, const value_type& _Value) const __GPU_ONLY
2647  {
2648  _Texture_write_helper<index<_Rank>, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Value, _Index);
2649  }
2650 };
2651 #pragma warning( pop )
2652 
2664 template <typename _Value_type, int _Rank> class texture_view : public details::_Texture_base<_Value_type, _Rank>
2665 {
2666  friend class texture_view<const _Value_type, _Rank>;
2667 
2668 public:
2679  texture_view(texture<_Value_type, _Rank>& _Src, unsigned int _Mipmap_level = 0) __CPU_ONLY
2680  : _Texture_base(_Src, _Mipmap_level, /*_View_mipmap_levels=*/1)
2681  {
2682  if (_Get_texture()->_Is_staging()) {
2683  throw runtime_exception("texture_view cannot be created from a staging texture object.", E_INVALIDARG);
2684  }
2685  }
2686 
2694  : _Texture_base(_Src, /*_Flatten_mipmap_levels=*/true)
2695  {
2696  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "writable texture_view can only be created from a single-component texture on an accelerator.");
2697  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "writable texture_view cannot be created from a unorm texture on an accelerator.");
2698  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "writable texture_view cannot be created from a norm texture on an accelerator.");
2699  }
2700 
2708  : _Texture_base(_Other)
2709  {
2710  }
2711 
2719  {
2720  if (this != &_Other)
2721  {
2722  _M_extent = _Other._M_extent;
2723  _M_texture_descriptor = _Other._M_texture_descriptor;
2724  }
2725  return *this;
2726  }
2727 
2731  ~texture_view() __GPU
2732  {
2733  }
2734 
2744  const value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY
2745  {
2746  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Read is only permissible on single-component writable texture_view.");
2747  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Read is not permissible on a writable unorm texture_view.");
2748  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Read is not permissible on a writable norm texture_view.");
2749 
2750  value_type _Tmp;
2751  _Texture_read_helper<index<_Rank>, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0);
2752  return _Tmp;
2753  }
2754 
2764  const value_type operator[] (int _I0) const __GPU_ONLY
2765  {
2766  static_assert(_Rank == 1, "const value_type operator[](int) is only permissible on texture_view<value_type, 1>.");
2767  return (*this)[index<1>(_I0)];
2768  }
2769 
2779  const value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY
2780  {
2781  return (*this)[_Index];
2782  }
2783 
2793  const value_type operator() (int _I0) const __GPU_ONLY
2794  {
2795  static_assert(_Rank == 1, "const value_type operator()(int) is only permissible on texture_view<value_type, 1>.");
2796  return (*this)[index<1>(_I0)];
2797  }
2798 
2811  const value_type operator() (int _I0, int _I1) const __GPU_ONLY
2812  {
2813  static_assert(_Rank == 2, "const value_type operator()(int, int) is only permissible on texture_view<value_type, 2>.");
2814  return (*this)[index<2>(_I0, _I1)];
2815  }
2816 
2832  const value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY
2833  {
2834  static_assert(_Rank == 3, "const value_type operator()(int, int, int) is only permissible on texture_view<value_type, 3>.");
2835  return (*this)[index<3>(_I0, _I1, _I2)];
2836  }
2837 
2847  const value_type get(const index<_Rank>& _Index) const __GPU_ONLY
2848  {
2849  return (*this)[_Index];
2850  }
2851 
2861  void set(const index<_Rank>& _Index, const value_type& _Value) const __GPU_ONLY
2862  {
2864  }
2865 };
2866 
2871 {
2874  filter_unknown = 0xFFFFFFFF,
2875 };
2876 
2881 {
2886  address_unknown = 0xFFFFFFFF,
2887 };
2888 
2892 class sampler
2893 {
2894  friend sampler direct3d::make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY;
2896 
2897  template <typename _Value_type, int _Rank>
2898  friend class texture_view;
2899 
2900 public:
2905  sampler() __CPU_ONLY
2908  _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f))
2909  {
2910  _Initialize();
2911  }
2912 
2920  sampler(filter_mode _Filter_mode)__CPU_ONLY
2921  : _M_filter_mode(_Filter_mode),
2923  _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f))
2924  {
2925  _Initialize();
2926  }
2927 
2938  sampler(address_mode _Address_mode, float_4 _Border_color = float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY
2940  _M_address_mode(_Address_mode),
2941  _M_border_color(_Border_color)
2942  {
2943  _Initialize();
2944  }
2945 
2959  sampler(filter_mode _Filter_mode, address_mode _Address_mode, float_4 _Border_color = float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY
2960  : _M_filter_mode(_Filter_mode),
2961  _M_address_mode(_Address_mode),
2962  _M_border_color(_Border_color)
2963  {
2964  _Initialize();
2965  }
2966 
2973  sampler(const sampler& _Other) __GPU
2974  : _M_filter_mode(_Other._M_filter_mode),
2975  _M_address_mode(_Other._M_address_mode),
2976  _M_border_color(_Other._M_border_color),
2977  _M_sampler_descriptor(_Other._M_sampler_descriptor)
2978  {
2979  }
2980 
2987  sampler(sampler &&_Other) __GPU
2988  : _M_filter_mode(_Other._M_filter_mode),
2992  {
2993  _Other._M_sampler_descriptor._M_data_ptr = NULL;
2994  _Other._M_sampler_descriptor._Set_sampler_ptr(NULL);
2995  }
2996 
3006  sampler& operator=(const sampler& _Other) __GPU
3007  {
3008  if (this != &_Other)
3009  {
3010  _M_filter_mode = _Other._M_filter_mode;
3011  _M_address_mode = _Other._M_address_mode;
3012  _M_border_color = _Other._M_border_color;
3013  _M_sampler_descriptor = _Other._M_sampler_descriptor;
3014  }
3015  return *this;
3016  }
3017 
3027  sampler& operator=(sampler&& _Other) __GPU
3028  {
3029  if (this != &_Other)
3030  {
3031  _M_filter_mode = _Other._M_filter_mode;
3032  _M_address_mode = _Other._M_address_mode;
3033  _M_border_color = _Other._M_border_color;
3034  _M_sampler_descriptor = _Other._M_sampler_descriptor;
3035  _Other._M_sampler_descriptor._M_data_ptr = NULL;
3036  _Other._M_sampler_descriptor._Set_sampler_ptr(NULL);
3037  }
3038  return *this;
3039  }
3040 
3044  __declspec(property(get=get_filter_mode)) Concurrency::graphics::filter_mode filter_mode;
3045  Concurrency::graphics::filter_mode get_filter_mode() const __GPU
3046  {
3047  return _M_filter_mode;
3048  }
3049 
3054  Concurrency::graphics::address_mode get_address_mode() const __GPU
3055  {
3056  return _M_address_mode;
3057  }
3058 
3062  __declspec(property(get=get_border_color)) Concurrency::graphics::float_4 border_color;
3063  Concurrency::graphics::float_4 get_border_color() const __GPU
3064  {
3065  return _M_border_color;
3066  }
3067 
3068 private:
3069  // internal storage abstraction
3071 
3072  // a private constructor to be used for constructing a sampler via interop.
3073  sampler(const _Sampler_descriptor & _Descriptor) __CPU_ONLY
3074  : _M_sampler_descriptor(_Descriptor),
3077  _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f))
3078  {
3079  // Although we could query border value from the adopted sampler, but it's not that useful
3080  // given that this is the only thing that we could query and when the address mode is not
3081  // address_border, border value is not relevant.
3082  }
3083 
3084  _Ret_ _Sampler* _Get_sampler_ptr() const __CPU_ONLY
3085  {
3087  }
3088 
3089  void _Initialize() __CPU_ONLY
3090  {
3091  // Check if the given filter_mode and address_mode are valid C++ AMP ones
3095  {
3096  throw runtime_exception("Invalid sampler configuration", E_INVALIDARG);
3097  }
3098 
3099  _Sampler_ptr samplerPtr = _Sampler::_Create(_M_filter_mode, _M_address_mode,
3102  }
3103 
3104  const _Sampler_descriptor & _Get_descriptor() const __GPU_ONLY
3105  {
3106  return _M_sampler_descriptor;
3107  }
3108 
3109  _Sampler_descriptor _M_sampler_descriptor;
3113 };
3114 
3124 template <typename _Value_type, int _Rank> class texture_view<const _Value_type, _Rank> : public details::_Texture_base<_Value_type, _Rank>
3125 {
3126 public:
3127  typedef typename const _Value_type value_type;
3130 
3138  : _Texture_base(_Src)
3139  {
3140  // only on the gpu it is not allowed
3141  static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels != 1, "Read-only texture_view cannot be created from single-component textures on an accelerator.");
3142  }
3143 
3151  : _Texture_base(_Src)
3152  {
3153  if (_Get_texture()->_Is_staging()) {
3154  throw runtime_exception("Read-only texture_view cannot be created from a staging texture object.", E_INVALIDARG);
3155  }
3156  }
3157 
3170  texture_view(const texture<_Value_type, _Rank>& _Src, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY
3171  : _Texture_base(_Src, _Most_detailed_mip, _Mip_levels)
3172  {
3173  if (_Get_texture()->_Is_staging()) {
3174  throw runtime_exception("Read-only texture_view cannot be created from a staging texture object.", E_INVALIDARG);
3175  }
3176  }
3177 
3185  : _Texture_base(_Other)
3186  {
3187  }
3188 
3196  : _Texture_base(_Other)
3197  {
3198  }
3199 
3214  texture_view(const texture_view<const _Value_type, _Rank>& _Other, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY
3215  : _Texture_base(_Other, _Most_detailed_mip, _Mip_levels)
3216  {
3217  }
3218 
3226  {
3227  if (this != &_Other)
3228  {
3229  _M_extent = _Other._M_extent;
3230  _M_texture_descriptor = _Other._M_texture_descriptor;
3231  }
3232  return *this;
3233  }
3234 
3243  {
3244  _M_extent = _Other._M_extent;
3245  _M_texture_descriptor = _Other._M_texture_descriptor;
3246  return *this;
3247  }
3248 
3252  ~texture_view() __GPU
3253  {
3254  }
3255 
3265  value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY
3266  {
3267  _Value_type _Tmp;
3268  _Texture_read_helper<index<_Rank>, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0);
3269  return _Tmp;
3270  }
3271 
3281  value_type operator[] (int _I0) const __GPU_ONLY
3282  {
3283  static_assert(_Rank == 1, "value_type operator[](int) is only permissible on texture_view<value_type, 1>.");
3284  return (*this)[index<1>(_I0)];
3285  }
3286 
3296  value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY
3297  {
3298  return (*this)[_Index];
3299  }
3300 
3310  value_type operator() (int _I0) const __GPU_ONLY
3311  {
3312  static_assert(_Rank == 1, "value_type texture_view::operator()(int) is only permissible on texture_view<value_type, 1>.");
3313  return (*this)[index<1>(_I0)];
3314  }
3315 
3328  value_type operator() (int _I0, int _I1) const __GPU_ONLY
3329  {
3330  static_assert(_Rank == 2, "value_type texture_view::operator()(int, int) is only permissible on texture_view<value_type, 2>.");
3331  return (*this)[index<2>(_I0, _I1)];
3332  }
3333 
3349  value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY
3350  {
3351  static_assert(_Rank == 3, "value_type texture_view::operator()(int, int, int) is only permissible on texture_view<value_type, 3>.");
3352  return (*this)[index<3>(_I0, _I1, _I2)];
3353  }
3354 
3368  value_type get(const index<_Rank>& _Index, unsigned int _Mip_level = 0) const __GPU_ONLY
3369  {
3370  _Value_type _Tmp;
3371  _Texture_read_helper<index<_Rank>, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, _Mip_level);
3372  return _Tmp;
3373  }
3374 
3391  value_type sample(const sampler& _Sampler, const coordinates_type& _Coord, float _Level_of_detail = 0.0f) const __GPU_ONLY
3392  {
3393  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "sample is not allowed for uint component types in the texture value_type.");
3394  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "sample is not allowed for int component types in the texture value_type.");
3395  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "sample is not allowed for double component types in the texture value_type.");
3396 
3397  _Value_type _Tmp;
3398  _Texture_sample_helper<coordinates_type, _Rank>::func(_M_texture_descriptor._M_data_ptr, _Sampler._Get_descriptor()._M_data_ptr, &_Tmp, _Coord, 4 /*Sampling*/, _Level_of_detail);
3399  return _Tmp;
3400  }
3401 
3421  template<filter_mode _Filter_mode = filter_linear, address_mode _Address_mode = address_clamp>
3422  value_type sample(const coordinates_type& _Coord, float _Level_of_detail = 0.0f) const __GPU_ONLY
3423  {
3424  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "sample is not allowed for uint component types in the texture value_type.");
3425  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "sample is not allowed for int component types in the texture value_type.");
3426  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "sample is not allowed for double component types in the texture value_type.");
3427  static_assert((_Filter_mode == filter_point || _Filter_mode == filter_linear), "Invalid filter mode for sample method.");
3428  static_assert((_Address_mode == address_wrap || _Address_mode == address_clamp || _Address_mode == address_mirror || _Address_mode == address_border),
3429  "Invalid address mode for sample method.");
3430 
3431  _Value_type _Tmp;
3432  // Predefined sampler id is constructed as filter_mode << 16 | address_mode. This is a contract between BE and runtime. Modify with caution!
3433  _Texture_predefined_sample_helper<coordinates_type, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Coord, _Filter_mode << 16 |_Address_mode, 4 /*Sampling*/, _Level_of_detail);
3434  return _Tmp;
3435  }
3436 
3449  const gather_return_type gather_red(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY
3450  {
3451  return _Gather(_Sampler, _Coord, 0);
3452  }
3453 
3466  const gather_return_type gather_green(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY
3467  {
3468  static_assert(1 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_green is valid only for textures with 2 or more components in the value_type.");
3469 
3470  return _Gather(_Sampler, _Coord, 1);
3471  }
3472 
3485  const gather_return_type gather_blue(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY
3486  {
3487  static_assert(2 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_blue is valid only for textures with 3 or more components in the value_type.");
3488 
3489  return _Gather(_Sampler, _Coord, 2);
3490  }
3491 
3504  const gather_return_type gather_alpha(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY
3505  {
3506  static_assert(3 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_alpha is valid only for textures with 4 components in the value_type.");
3507 
3508  return _Gather(_Sampler, _Coord, 3);
3509  }
3510 
3523  template<address_mode _Address_mode = address_clamp>
3524  const gather_return_type gather_red(const coordinates_type& _Coord) const __GPU_ONLY
3525  {
3526  return _Gather<_Address_mode>(_Coord, 0);
3527  }
3528 
3541  template<address_mode _Address_mode = address_clamp>
3542  const gather_return_type gather_green(const coordinates_type& _Coord) const __GPU_ONLY
3543  {
3544  static_assert(1 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_green is valid only for textures with 2 or more components in the value_type.");
3545 
3546  return _Gather<_Address_mode>(_Coord, 1);
3547  }
3548 
3561  template<address_mode _Address_mode = address_clamp>
3562  const gather_return_type gather_blue(const coordinates_type& _Coord) const __GPU_ONLY
3563  {
3564  static_assert(2 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_blue is valid only for textures with 3 or more components in the value_type.");
3565 
3566  return _Gather<_Address_mode>(_Coord, 2);
3567  }
3568 
3581  template<address_mode _Address_mode = address_clamp>
3582  const gather_return_type gather_alpha(const coordinates_type& _Coord) const __GPU_ONLY
3583  {
3584  static_assert(3 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_alpha is valid only for textures with 4 components in the value_type.");
3585 
3586  return _Gather<_Address_mode>(_Coord, 3);
3587  }
3588 
3589 private:
3590  const gather_return_type _Gather(const sampler& _Sampler, const coordinates_type& _Coord, unsigned int _Component) const __GPU_ONLY
3591  {
3592  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "gather is not allowed for uint component types in the texture value_type.");
3593  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "gather is not allowed for int component types in the texture value_type.");
3594  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "gather is not allowed for double component types in the texture value_type.");
3595  static_assert(rank == 2, "gather methods are only permissible on texture_view<value_type, 2>.");
3596 
3597  gather_return_type _Tmp;
3598  _Texture_sample_helper<coordinates_type, _Rank>::func(_M_texture_descriptor._M_data_ptr, _Sampler._Get_descriptor()._M_data_ptr, &_Tmp, _Coord, _Component, /*_Level_of_detail=*/0.0f);
3599  return _Tmp;
3600  }
3601 
3602  template<address_mode _Address_mode>
3603  const gather_return_type _Gather(const coordinates_type& _Coord, unsigned int _Component) const __GPU_ONLY
3604  {
3605  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "gather is not allowed for uint component types in the texture value_type.");
3606  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "gather is not allowed for int component types in the texture value_type.");
3607  static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "gather is not allowed for double component types in the texture value_type.");
3608  static_assert(rank == 2, "gather methods are only permissible on texture_view<value_type, 2>.");
3609  static_assert((_Address_mode == address_wrap || _Address_mode == address_clamp || _Address_mode == address_mirror || _Address_mode == address_border),
3610  "Invalid address mode for gather methods.");
3611 
3612  gather_return_type _Tmp;
3613  // Predefined sampler id is constructed as filter_mode << 16 | address_mode. This is a contract between BE and runtime. Modify with caution!
3614  // gather only used the address_mode of the sampler, internally we use filter_point so that the predefined sampler id scheme is same for both sample and gather.
3615  _Texture_predefined_sample_helper<coordinates_type, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Coord, filter_point << 16 |_Address_mode, _Component, /*_Level_of_detail=*/0.0f);
3616  return _Tmp;
3617  }
3618 };
3619 
3620 namespace details
3621 {
3622 
3623 template <typename T>
3625 {
3626  static const bool is_texture = false;
3627  static const bool is_writable = false;
3628 };
3629 
3630 template <typename _Value_type, int _Rank>
3631 struct texture_traits<texture<_Value_type, _Rank>>
3632 {
3633  static const bool is_texture = true;
3634  static const bool is_writable = true;
3635 };
3636 
3637 template <typename _Value_type, int _Rank>
3638 struct texture_traits<const texture<_Value_type, _Rank>>
3639 {
3640  static const bool is_texture = true;
3641  static const bool is_writable = false;
3642 };
3643 
3644 template <typename _Value_type, int _Rank>
3645 struct texture_traits<writeonly_texture_view<_Value_type, _Rank>>
3646 {
3647  static const bool is_texture = true;
3648  static const bool is_writable = true;
3649 };
3650 
3651 template <typename _Value_type, int _Rank>
3652 struct texture_traits<const writeonly_texture_view<_Value_type, _Rank>>
3653 {
3654  static const bool is_texture = true;
3655  static const bool is_writable = true;
3656 };
3657 
3658 template <typename _Value_type, int _Rank>
3659 struct texture_traits<texture_view<_Value_type, _Rank>>
3660 {
3661  static const bool is_texture = true;
3662  static const bool is_writable = true;
3663 };
3664 
3665 template <typename _Value_type, int _Rank>
3666 struct texture_traits<texture_view<const _Value_type, _Rank>>
3667 {
3668  static const bool is_texture = true;
3669  static const bool is_writable = false;
3670 };
3671 
3672 template <typename _Value_type, int _Rank>
3673 struct texture_traits<const texture_view<const _Value_type, _Rank>>
3674 {
3675  static const bool is_texture = true;
3676  static const bool is_writable = false;
3677 };
3678 
3679 template <typename _Value_type, int _Rank>
3680 struct texture_traits<const texture_view<_Value_type, _Rank>>
3681 {
3682  static const bool is_texture = true;
3683  static const bool is_writable = true;
3684 };
3685 
3686 // The helper function used by ETW and copy functions to calculate number of bytes for the copy operation given input section
3687 template <typename _Value_type, int _Rank>
3688 unsigned int _Get_section_size(const _Texture_base<_Value_type, _Rank> &_Tex, const extent<_Rank> &_Extent)
3689 {
3690  _Texture* _Tex_ptr = _Get_texture(_Tex);
3691  _Texture_descriptor _Tex_desc = _Get_texture_descriptor(_Tex);
3692 
3693  return _Tex_ptr->_Get_data_length(_Tex_desc._Get_most_detailed_mipmap_level(), _Tex_desc._Get_view_mipmap_levels(), _Get_dimensions(_Extent, /*Mip_offset=*/0).data());
3694 }
3695 
3696 template <typename _Input_iterator, typename _Value_type>
3697 _Event _Copy_async_impl(_Input_iterator _First, _Input_iterator _Last,
3698  _In_ _Texture *_Dst, const size_t *_Dst_offset, unsigned int _Dst_mipmap_level,
3699  const size_t *_Copy_extent, const size_t *_Preferred_copy_chunk_extent = NULL)
3700 {
3701  _ASSERTE(_Dst != nullptr);
3702  _ASSERTE(_Dst_offset != nullptr);
3703  _ASSERTE(_Copy_extent != nullptr);
3704 
3705  _ASSERTE((unsigned int)std::distance(_First, _Last) >= (_Copy_extent[0] * _Copy_extent[1] * _Copy_extent[2]));
3706 
3707  // The copy region should be within the bounds of the destination texture
3708  _ASSERTE((_Dst_offset[0] + _Copy_extent[0]) <= _Dst->_Get_width(_Dst_mipmap_level));
3709  _ASSERTE((_Dst_offset[1] + _Copy_extent[1]) <= _Dst->_Get_height(_Dst_mipmap_level));
3710  _ASSERTE((_Dst_offset[2] + _Copy_extent[2]) <= _Dst->_Get_depth(_Dst_mipmap_level));
3711 
3712 #pragma warning( push )
3713 #pragma warning( disable : 4127 ) // conditional expression is constant
3714  if ((sizeof(_Value_type) > sizeof(unsigned char)) && (_Dst->_Get_bits_per_element() != (8U * sizeof(_Value_type))))
3715  {
3716  throw runtime_exception("Iterator-based copy is not supported on textures where the size of the _Value_type is not equal to the texel size.", E_INVALIDARG);
3717  }
3718 #pragma warning( pop )
3719 
3720  // If the dest is accessible on the host we can perform the copy entirely on the host
3721  if (_Dst->_Get_host_ptr() != NULL)
3722  {
3723  // We have made sure that the three multiplications below won't cause integer overflow when creating the texture
3724  _ASSERTE(((_Dst->_Get_bits_per_element() * _Copy_extent[0]) % (8U * sizeof(_Value_type))) == 0);
3725 
3726  size_t _Row_size = (_Dst->_Get_bits_per_element() * _Copy_extent[0]) >> 3; // in bytes
3727  size_t _Depth_slice_size = _Row_size * _Copy_extent[1];
3728 
3729  size_t _Row_pitch = _Dst->_Get_row_pitch();
3730  size_t _Depth_pitch = _Dst->_Get_depth_pitch();
3731  _ASSERTE(_Row_pitch >= _Row_size);
3732  _ASSERTE(_Depth_pitch >= _Depth_slice_size);
3733 
3734  size_t _Dst_offset_in_bytes = ((_Dst_offset[0] * _Dst->_Get_bits_per_element()) >> 3) +
3735  (_Dst_offset[1] * _Row_pitch) + (_Dst_offset[2] * _Depth_pitch);
3736 
3737  unsigned char *_PDest = reinterpret_cast<unsigned char*>(_Dst->_Get_host_ptr()) + _Dst_offset_in_bytes;
3738 
3739  _Copy_data_on_host(_Dst->_Get_rank(), _First, reinterpret_cast<_Value_type*>(_PDest),
3740  _Row_size / sizeof(_Value_type), _Copy_extent[1], _Copy_extent[2],
3741  _Row_pitch, _Depth_pitch, _Row_size / sizeof(_Value_type), _Depth_slice_size / sizeof(_Value_type));
3742 
3743  return _Event();
3744  }
3745 
3746  // The dest is not accessbile on the host; we need to copy src to
3747  // a temporary staging texture and launch a copy from the staging texture
3748  // to the dest texture.
3749  _Event _Ev;
3750 
3751  // Determine the copy chunk extent
3752  std::array<size_t, 3> _Copy_chunk_extent;
3753  if (_Preferred_copy_chunk_extent != NULL)
3754  {
3755  std::copy(&_Preferred_copy_chunk_extent[0], &_Preferred_copy_chunk_extent[3], _Copy_chunk_extent.begin());
3756  }
3757  else
3758  {
3759  _Get_preferred_copy_chunk_extent(_Dst->_Get_rank(), _Copy_extent[0], _Copy_extent[1], _Copy_extent[2], _Dst->_Get_bits_per_element(), _Copy_chunk_extent.data());
3760  }
3761 
3762  std::array<size_t, 3> _Curr_copy_offset;
3763  std::copy(&_Dst_offset[0], &_Dst_offset[3], _Curr_copy_offset.begin());
3764 
3765  std::array<size_t, 3> _Remaining_copy_extent;
3766  std::copy(&_Copy_extent[0], &_Copy_extent[3], _Remaining_copy_extent.begin());
3767 
3768  bool _Truncated_copy = false;
3769  do
3770  {
3771  _Texture_ptr _Dst_staging_tex_ptr;
3772  std::array<size_t, 3> _Curr_copy_extent;
3773  _Truncated_copy = _Get_chunked_staging_texture(_Dst, _Copy_chunk_extent.data(), _Remaining_copy_extent.data(), _Curr_copy_extent.data(), &_Dst_staging_tex_ptr);
3774 
3775 
3776  // Now copy from the src pointer to the temp staging texture
3777  _Dst_staging_tex_ptr->_Map_buffer(_Write_access, true /* _Wait */);
3778 
3779  std::array<size_t, 3> _Dst_staging_tex_offset;
3780  _Dst_staging_tex_offset.fill(0);
3781  _Event _Temp_ev = _Copy_async_impl<_Input_iterator, _Value_type>(_First, _Last, _Dst_staging_tex_ptr,
3782  _Dst_staging_tex_offset.data(), /*_Dst_mipmap_level=*/0, _Curr_copy_extent.data(), _Copy_chunk_extent.data());
3783 
3784  // Now chain a copy from the temporary staging texture to the _Dst texture
3785  _Texture_ptr _Dst_tex_ptr = _Dst;
3786  _Temp_ev = _Temp_ev._Add_continuation(std::function<_Event()>([_Dst_staging_tex_ptr, _Dst_tex_ptr, _Curr_copy_extent,
3787  _Dst_staging_tex_offset, _Curr_copy_offset, _Dst_mipmap_level]() mutable -> _Event
3788  {
3789  return _Dst_staging_tex_ptr->_Copy_to_async(_Dst_tex_ptr, _Curr_copy_extent.data(), _Dst_staging_tex_offset.data(), _Curr_copy_offset.data(), /*_Src_mipmap_level=*/0, _Dst_mipmap_level);
3790  }));
3791 
3792  _Ev = _Ev._Add_event(_Temp_ev);
3793 
3794  // Now adjust the _Src and _Dst offsets for the remaining part of the copy
3795  if (_Truncated_copy)
3796  {
3797  // The offset only needs to be adjusted in the most significant dimension
3798  _Curr_copy_offset[_Dst->_Get_rank() - 1] += _Curr_copy_extent[_Dst->_Get_rank() - 1];
3799  std::advance(_First, (((_Curr_copy_extent[0] * _Dst->_Get_bits_per_element()) >> 3) / sizeof(_Value_type)) * _Curr_copy_extent[1] * _Curr_copy_extent[2]);
3800  }
3801 
3802  } while (_Truncated_copy);
3803 
3804  return _Ev;
3805 }
3806 
3807 template <typename _Output_iterator, typename _Value_type>
3808 _Event _Copy_async_impl(_Texture *_Tex, const size_t *_Tex_offset, unsigned int _Src_mipmap_level, const size_t *_Copy_extent, _Output_iterator _First, const size_t *_Preferred_copy_chunk_extent = NULL)
3809 {
3810  _ASSERTE(_Tex != nullptr);
3811  _ASSERTE(_Tex_offset != nullptr);
3812  _ASSERTE(_Copy_extent != nullptr);
3813 
3814  // The copy region should be within the bounds of the source texture
3815  _ASSERTE((_Tex_offset[0] + _Copy_extent[0]) <= _Tex->_Get_width(_Src_mipmap_level));
3816  _ASSERTE((_Tex_offset[1] + _Copy_extent[1]) <= _Tex->_Get_height(_Src_mipmap_level));
3817  _ASSERTE((_Tex_offset[2] + _Copy_extent[2]) <= _Tex->_Get_depth(_Src_mipmap_level));
3818 
3819 #pragma warning( push )
3820 #pragma warning( disable : 4127 ) // conditional expression is constant
3821  if ((sizeof(_Value_type) > sizeof(unsigned char)) && (_Tex->_Get_bits_per_element() != (8U * sizeof(_Value_type))))
3822  {
3823  throw runtime_exception("Iterator-based copy is not supported on textures where the size of the _Value_type is not equal to the texel size.", E_INVALIDARG);
3824  }
3825 #pragma warning( pop )
3826 
3827  // If the texture is available on the host then we can perform the copy entirely on the host
3828  if (_Tex->_Get_host_ptr() != nullptr)
3829  {
3830  // We have made sure that the three multiplications below won't cause integer overflow when creating the texture
3831  _ASSERTE(((_Tex->_Get_bits_per_element() * _Copy_extent[0]) % 8U) == 0);
3832 
3833  size_t _Row_size = (_Tex->_Get_bits_per_element() * _Copy_extent[0]) >> 3; // in bytes
3834  size_t _Depth_slice_size = _Row_size * _Copy_extent[1];
3835 
3836  size_t _Row_pitch = _Tex->_Get_row_pitch();
3837  size_t _Depth_pitch = _Tex->_Get_depth_pitch();
3838  _ASSERTE(_Row_pitch >= _Row_size);
3839  _ASSERTE(_Depth_pitch >= _Depth_slice_size);
3840 
3841  size_t _Tex_offset_in_bytes = ((_Tex_offset[0] * _Tex->_Get_bits_per_element()) >> 3) +
3842  (_Tex_offset[1] * _Row_pitch) + (_Tex_offset[2] * _Depth_pitch);
3843 
3844  unsigned char *_PTex = reinterpret_cast<unsigned char*>(_Tex->_Get_host_ptr()) + _Tex_offset_in_bytes;
3845 
3846  _Copy_data_on_host(_Tex->_Get_rank(), reinterpret_cast<_Value_type*>(_PTex), _First,
3847  _Row_size / sizeof(_Value_type), _Copy_extent[1], _Copy_extent[2],
3848  _Row_pitch, _Depth_pitch, _Row_size / sizeof(_Value_type), _Depth_slice_size / sizeof(_Value_type));
3849 
3850  return _Event();
3851  }
3852 
3853  // The texture is not accessbile on the host; we need to copy to/from a staging
3854  // texture before the copy to the destination. This is done in chunks, such that
3855  // we can concurrently copy from the source texture to a staging texture while
3856  // copying from a staging texture from a previous chunk to the destination.
3857  _Event _Ev;
3858 
3859  // Determine the copy chunk extent
3860  std::array<size_t, 3> _Copy_chunk_extent;
3861  if (_Preferred_copy_chunk_extent != nullptr)
3862  {
3863  std::copy(&_Preferred_copy_chunk_extent[0], &_Preferred_copy_chunk_extent[3], _Copy_chunk_extent.begin());
3864  }
3865  else
3866  {
3867  _Get_preferred_copy_chunk_extent(_Tex->_Get_rank(), _Copy_extent[0], _Copy_extent[1], _Copy_extent[2], _Tex->_Get_bits_per_element(), _Copy_chunk_extent.data());
3868  }
3869 
3870  std::array<size_t, 3> _Curr_copy_offset;
3871  std::copy(&_Tex_offset[0], &_Tex_offset[3], _Curr_copy_offset.begin());
3872 
3873  std::array<size_t, 3> _Remaining_copy_extent;
3874  std::copy(&_Copy_extent[0], &_Copy_extent[3], _Remaining_copy_extent.begin());
3875 
3876  bool _Truncated_copy = false;
3877 
3878  _Texture_ptr _Staging_tex_ptr;
3879  std::array<size_t, 3> _Curr_copy_extent;
3880  _Truncated_copy = _Get_chunked_staging_texture(_Tex, _Copy_chunk_extent.data(), _Remaining_copy_extent.data(), _Curr_copy_extent.data(), &_Staging_tex_ptr);
3881 
3882  // Now copy into the temp staging texture
3883  std::array<size_t, 3> _Staging_tex_offset;
3884  _Staging_tex_offset.fill(0);
3885  _Event _Temp_ev = _Copy_async_impl(_Tex, _Curr_copy_offset.data(), _Src_mipmap_level,
3886  _Staging_tex_ptr._Get_ptr(), _Staging_tex_offset.data(), /*_Dst_mipmap_level=*/0,
3887  _Curr_copy_extent.data(), _Copy_chunk_extent.data());
3888  _Ev = _Ev._Add_event(_Temp_ev);
3889 
3890  // If we have finished our copy, we just need to add a continuation to copy
3891  // from the temporary staging texture to the _Dst pointer
3892  if (!_Truncated_copy)
3893  {
3894  return _Ev._Add_continuation(std::function<_Event()>([_Staging_tex_ptr,
3895  _Curr_copy_extent, _Staging_tex_offset, _Copy_chunk_extent, _First]() mutable -> _Event
3896  {
3897  return _Copy_async_impl<_Output_iterator, _Value_type>(_Staging_tex_ptr, _Staging_tex_offset.data(), /*_Src_mipmap_level=*/0, _Curr_copy_extent.data(), _First, _Copy_chunk_extent.data());
3898  }));
3899  }
3900  else
3901  {
3902  // The copy was truncated. We need to recursively perform the rest of the copy
3903  _Texture_ptr _Tex_ptr = _Tex;
3904  _Curr_copy_offset[_Tex->_Get_rank() - 1] += _Curr_copy_extent[_Tex->_Get_rank() - 1];
3905  return _Ev._Add_continuation(std::function<_Event()>([_Staging_tex_ptr, _First, _Curr_copy_extent,
3906  _Staging_tex_offset, _Tex_ptr, _Curr_copy_offset, _Remaining_copy_extent, _Copy_chunk_extent, _Src_mipmap_level]() mutable -> _Event
3907  {
3908  // Initiate copying of the remaining portion
3909  _Output_iterator _New_dst_iter = _First;
3910  _Advance_output_iterator<decltype(_New_dst_iter), size_t>(_New_dst_iter, (((_Curr_copy_extent[0] * _Tex_ptr->_Get_bits_per_element()) >> 3) / sizeof(_Value_type)) * _Curr_copy_extent[1] * _Curr_copy_extent[2]);
3911  _Event _Ev1 = _Copy_async_impl<_Output_iterator, _Value_type>(_Tex_ptr, _Curr_copy_offset.data(), _Src_mipmap_level, _Remaining_copy_extent.data(), _New_dst_iter, _Copy_chunk_extent.data());
3912 
3913  // Now copy the data from the temp staging buffer to the _Dst pointer
3914  _Event _Ev2 = _Copy_async_impl<_Output_iterator, _Value_type>(_Staging_tex_ptr, _Staging_tex_offset.data(), /*_Src_mipmap_level=*/0, _Curr_copy_extent.data(), _First, _Copy_chunk_extent.data());
3915 
3916  return _Ev2._Add_event(_Ev1);
3917  }));
3918  }
3919 }
3920 
3921 template <typename _Value_type, int _Rank>
3922 _Event _Copy_async_impl(const void * _Src, unsigned int _Src_byte_size, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent)
3923 {
3924  _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent);
3925 
3926  if (_Dst.get_mipmap_levels() > 1)
3927  {
3928  throw runtime_exception("Invalid destination - multiple mipmap levels cannot be copied from source", E_INVALIDARG);
3929  }
3930 
3931  if (_Src_byte_size < _Get_section_size(_Dst, _Copy_extent))
3932  {
3933  if (_Dst.extent == _Copy_extent)
3934  {
3935  throw runtime_exception("Invalid _Src_byte_size argument. _Src_byte_size is smaller than the total size of _Dst.", E_INVALIDARG);
3936  }
3937  else
3938  {
3939  throw runtime_exception("Invalid _Src_byte_size argument. _Src_byte_size is smaller than the provided section of _Dst.", E_INVALIDARG);
3940  }
3941  }
3942 
3943  _Texture *_Dst_tex_ptr = _Get_texture(_Dst);
3944  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0);
3945  std::array<size_t, 3> _Dst_offset_arr = _Get_indices(_Dst_offset);
3946  auto _First = stdext::make_unchecked_array_iterator(reinterpret_cast<const unsigned char*>(_Src));
3947  auto _Last = stdext::make_unchecked_array_iterator(reinterpret_cast<const unsigned char*>(_Src) + _Src_byte_size);
3948 
3949  return _Copy_async_impl<decltype(_First), unsigned char>(_First, _Last, _Dst_tex_ptr, _Dst_offset_arr.data(), _Get_texture_descriptor(_Dst)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data());
3950 }
3951 
3952 template<typename _Value_type, int _Rank>
3953 _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank>& _Src, const index<_Rank> &_Src_offset, const extent<_Rank> &_Copy_extent, _Out_ void * _Dst, unsigned int _Dst_byte_size)
3954 {
3955  _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent);
3956 
3957  if (_Src.get_mipmap_levels() > 1)
3958  {
3959  throw runtime_exception("Invalid source - multiple mipmap levels cannot be copied to destination", E_INVALIDARG);
3960  }
3961 
3962  if (_Get_section_size(_Src, _Copy_extent) > _Dst_byte_size)
3963  {
3964  if (_Src.extent == _Copy_extent)
3965  {
3966  throw runtime_exception("Invalid _Dst_byte_size argument. _Dst_byte_size is smaller than the size of _Src.", E_INVALIDARG);
3967  }
3968  else
3969  {
3970  throw runtime_exception("Invalid _Dst_byte_size argument. _Dst_byte_size is smaller than the provided section of _Src.", E_INVALIDARG);
3971  }
3972  }
3973 
3974  _Texture *_Src_tex_ptr = _Get_texture(_Src);
3975  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0);
3976  std::array<size_t, 3> _Src_offset_arr = _Get_indices(_Src_offset);
3977 
3978  auto _First = stdext::make_unchecked_array_iterator(reinterpret_cast<unsigned char*>(_Dst));
3979 
3980  return _Copy_async_impl<decltype(_First), unsigned char>(_Src_tex_ptr, _Src_offset_arr.data(), _Get_texture_descriptor(_Src)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data(), _First);
3981 }
3982 
3983 template <typename _Output_iterator, typename _Value_type, int _Rank>
3984 _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank> &_Src, const index<_Rank> &_Src_offset, const extent<_Rank> &_Copy_extent, _Output_iterator _Dest_iter)
3985 {
3986  _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent);
3987 
3988  if (_Src.get_mipmap_levels() > 1)
3989  {
3990  throw runtime_exception("Invalid source - multiple mipmap levels cannot be copied to destination", E_INVALIDARG);
3991  }
3992 
3993  _Texture *_Src_tex_ptr = _Get_texture(_Src);
3994  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0);
3995  std::array<size_t, 3> _Src_offset_arr = _Get_indices(_Src_offset);
3996 
3997  return _Copy_async_impl<_Output_iterator, _Value_type>(_Src_tex_ptr, _Src_offset_arr.data(), _Get_texture_descriptor(_Src)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data(), _Dest_iter);
3998 }
3999 
4000 template <typename _Input_iterator, typename _Value_type, int _Rank>
4001 _Event _Copy_async_impl(_Input_iterator _First, _Input_iterator _Last, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent)
4002 {
4003  _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent);
4004  if (static_cast<unsigned int>(std::distance(_First, _Last)) < _Copy_extent.size())
4005  {
4006  throw runtime_exception("Inadequate amount of data supplied through the iterators", E_INVALIDARG);
4007  }
4008 
4009  if (_Dst.get_mipmap_levels() > 1)
4010  {
4011  throw runtime_exception("Invalid destination - multiple mipmap levels cannot be copied from source", E_INVALIDARG);
4012  }
4013 
4014  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0);
4015  std::array<size_t, 3> _Dst_offset_arr = _Get_indices(_Dst_offset);
4016 
4017  _Texture *_Dst_tex_ptr = _Get_texture(_Dst);
4018  return _Copy_async_impl<_Input_iterator, _Value_type>(_First, _Last, _Dst_tex_ptr, _Dst_offset_arr.data(), _Get_texture_descriptor(_Dst)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data());
4019 }
4020 
4021 template<typename _Value_type, int _Rank>
4023  const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset,
4024  const extent<_Rank> &_Copy_extent)
4025 {
4026  _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent);
4027  _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent);
4028 
4029  _Texture_descriptor _Src_tex_desc = _Get_texture_descriptor(_Src);
4030  _Texture_descriptor _Dst_tex_desc = _Get_texture_descriptor(_Dst);
4031 
4032  if (_Src_tex_desc._Get_view_mipmap_levels() != _Dst_tex_desc._Get_view_mipmap_levels())
4033  {
4034  throw runtime_exception("The source and destination textures must have the exactly the same number of mipmap levels for texture copy.", E_INVALIDARG);
4035  }
4036 
4037  bool _Is_whole_texture_copy = (_Src_offset == _Dst_offset && _Src_offset == index<_Rank>() && _Src.extent == _Dst.extent && _Src.extent == _Copy_extent);
4038 
4039  if (_Src_tex_desc._Get_view_mipmap_levels() > 1 && !_Is_whole_texture_copy)
4040  {
4041  throw runtime_exception("Sections are not allowed when copy involves multiple mipmap levels", E_INVALIDARG);
4042  }
4043 
4044  if (_Src_tex_desc._Are_mipmap_levels_overlapping(&_Dst_tex_desc))
4045  {
4046  throw runtime_exception("The source and destination are overlapping areas on the same texture", E_INVALIDARG);
4047  }
4048 
4049  _Texture* _Src_tex = _Get_texture(_Src);
4050  _Texture* _Dst_tex = _Get_texture(_Dst);
4051 
4052  // Formats must be identical for non-adopted textures. Textures created through D3D interop are not subject to this test
4053  // to allow copy between related, but not identical, formats. Attempting to copy between unrelated formats through interop
4054  // will result in exceptions in debug mode and undefined behavior in release mode.
4055  if (!_Src_tex->_Is_adopted() && !_Dst_tex->_Is_adopted() && (_Src_tex->_Get_texture_format() != _Dst_tex->_Get_texture_format()))
4056  {
4057  throw runtime_exception("The source and destination textures are not compatible.", E_INVALIDARG);
4058  }
4059 
4060  std::array<size_t, 3> _Src_offset_arr = _Get_indices(_Src_offset);
4061  std::array<size_t, 3> _Dst_offset_arr = _Get_indices(_Dst_offset);
4062 
4063  _Event _Copy_event;
4064 
4065  unsigned int _Src_most_detailed_mipmap_level = _Src_tex_desc._Get_most_detailed_mipmap_level();
4066  unsigned int _Dst_most_detailed_mipmap_level = _Dst_tex_desc._Get_most_detailed_mipmap_level();
4067 
4068  // Copy all mipmap levels from source to destination one by one.
4069  // Note that the offsets are not allowed therefore only dimensions need to be updated for subsequent mipmap levels
4070  for (unsigned int _Mip_offset = 0; _Mip_offset < _Src_tex_desc._Get_view_mipmap_levels(); ++_Mip_offset)
4071  {
4072  std::array<size_t, 3> _Copy_extent_arr = _Get_dimensions(_Copy_extent, _Mip_offset);
4073 
4074  auto _Step_event = _Copy_async_impl(_Src_tex, _Src_offset_arr.data(), _Src_most_detailed_mipmap_level + _Mip_offset,
4075  _Dst_tex, _Dst_offset_arr.data(), _Dst_most_detailed_mipmap_level + _Mip_offset,
4076  _Copy_extent_arr.data());
4077 
4078  _Copy_event = _Copy_event._Add_event(_Step_event);
4079  }
4080 
4081  return _Copy_event;
4082 }
4083 
4084 } // namespace details
4085 
4104 template <typename _Src_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture, void>::type> void copy(const _Src_type &_Src, _Out_ void * _Dst, unsigned int _Dst_byte_size)
4105 {
4107  nullptr,
4108  _Get_section_size(_Src, _Src.extent));
4109 
4110  details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst, _Dst_byte_size)._Get();
4111 
4113 }
4114 
4139 template <typename _Src_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, _Out_ void * _Dst, unsigned int _Dst_byte_size)
4140 {
4142  nullptr,
4143  _Get_section_size(_Src, _Copy_extent));
4144 
4145  details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst, _Dst_byte_size)._Get();
4146 
4148 }
4149 
4150 
4169 template <typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst)
4170 {
4171  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4174  _Get_section_size(_Dst, _Dst.extent));
4175 
4176  details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, index<_Dst_type::rank>(), _Dst.extent)._Get();
4177 
4179 }
4180 
4202 template <typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst,
4203  const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent)
4204 {
4205  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4208  _Get_section_size(_Dst, _Copy_extent));
4209 
4210  details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, _Dst_offset, _Copy_extent)._Get();
4211 
4213 }
4214 
4215 
4237 template<typename _Src_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture, void>::type> concurrency::completion_future copy_async(const _Src_type &_Src, _Out_ void * _Dst, unsigned int _Dst_byte_size)
4238 {
4240  nullptr,
4241  _Get_section_size(_Src, _Src.extent));
4242 
4243  _Event _Ev = details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst, _Dst_byte_size);
4244 
4246 }
4247 
4272 template<typename _Src_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture, void>::type> concurrency::completion_future copy_async(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent,
4273  _Out_ void * _Dst, unsigned int _Dst_byte_size)
4274 {
4276  nullptr,
4277  _Get_section_size(_Src, _Copy_extent));
4278 
4279  _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst, _Dst_byte_size);
4280 
4282 }
4283 
4302 template <typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst)
4303 {
4304  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4307  _Get_section_size(_Dst, _Dst.extent));
4308 
4309  _Event _Ev = details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, index<_Dst_type::rank>(), _Dst.extent);
4310 
4312 }
4313 
4338 template <typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst,
4339  const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent)
4340 {
4341  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4344  _Get_section_size(_Dst, _Copy_extent));
4345 
4346  _Event _Ev = details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, _Dst_offset, _Copy_extent);
4347 
4349 }
4350 
4369 template <typename InputIterator, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(InputIterator _First, InputIterator _Last, _Dst_type &_Dst)
4370 {
4371  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4374  _Get_section_size(_Dst, _Dst.extent));
4375 
4376  details::_Copy_async_impl(_First, _Last, _Dst, index<_Dst_type::rank>(), _Dst.extent)._Get();
4377 
4379 }
4380 
4405 template <typename InputIterator, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(InputIterator _First, InputIterator _Last, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent)
4406 {
4407  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4410  _Get_section_size(_Dst, _Copy_extent));
4411 
4412  details::_Copy_async_impl(_First, _Last, _Dst, _Dst_offset, _Copy_extent)._Get();
4413 
4415 }
4416 
4429 template <typename _Src_type, typename OutputIterator, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && !details::texture_traits<OutputIterator>::is_texture, void>::type> void copy(const _Src_type &_Src, OutputIterator _Dst)
4430 {
4432  nullptr,
4433  _Get_section_size(_Src, _Src.extent));
4434 
4435  details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst)._Get();
4436 
4438 }
4439 
4458 template <typename _Src_type, typename OutputIterator, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && !details::texture_traits<OutputIterator>::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, OutputIterator _Dst)
4459 {
4461  nullptr,
4462  _Get_section_size(_Src, _Copy_extent));
4463 
4464  details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst)._Get();
4465 
4467 }
4468 
4484 template <typename _Src_type, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const _Src_type &_Src, _Dst_type &_Dst)
4485 {
4486  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4487 
4488  if (_Src.extent != _Dst.extent)
4489  {
4490  throw runtime_exception("The source and destination textures must have the exactly the same extent for whole-texture copy.", E_INVALIDARG);
4491  }
4492 
4495  _Get_section_size(_Dst, _Dst.extent));
4496 
4498 
4500 }
4501 
4526 template <typename _Src_type, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Src_type::rank> &_Copy_extent)
4527 {
4528  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4531  _Get_section_size(_Src, _Copy_extent));
4532 
4533  details::_Copy_async_impl(_Src, _Src_offset, _Dst, _Dst_offset, _Copy_extent)._Get();
4534 
4536 }
4537 
4559 template <typename InputIterator, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(InputIterator _First, InputIterator _Last, _Dst_type &_Dst)
4560 {
4561  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4564  _Get_section_size(_Dst, _Dst.extent));
4565 
4566  _Event _Ev = details::_Copy_async_impl<InputIterator, _Dst_type::value_type, _Dst_type::rank>(_First, _Last, _Dst, index<_Dst_type::rank>(), _Dst.extent);
4567 
4569 }
4570 
4598 template <typename InputIterator, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(InputIterator _First, InputIterator _Last, _Dst_type &_Dst,
4599  const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent)
4600 {
4601  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4604  _Get_section_size(_Dst, _Copy_extent));
4605 
4606  _Event _Ev = details::_Copy_async_impl<InputIterator, _Dst_type::value_type, _Dst_type::rank>(_First, _Last, _Dst, _Dst_offset, _Copy_extent);
4607 
4609 }
4610 
4626 template <typename _Src_type, typename OutputIterator, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && !details::texture_traits<OutputIterator>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, OutputIterator _Dst)
4627 {
4629  nullptr,
4630  _Get_section_size(_Src, _Src.extent));
4631 
4632  _Event _Ev = details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst);
4633 
4635 }
4636 
4658 template <typename _Src_type, typename OutputIterator, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && !details::texture_traits<OutputIterator>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, OutputIterator _Dst)
4659 {
4661  nullptr,
4662  _Get_section_size(_Src, _Copy_extent));
4663 
4664  _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst);
4665 
4667 }
4668 
4687 template <typename _Src_type, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, _Dst_type &_Dst)
4688 {
4689  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4690 
4691  if (_Src.extent != _Dst.extent)
4692  {
4693  throw runtime_exception("The source and destination textures must have the exactly the same extent for whole-texture copy.", E_INVALIDARG);
4694  }
4697  _Get_section_size(_Dst, _Dst.extent));
4698 
4700 
4702 }
4703 
4731 template <typename _Src_type, typename _Dst_type, typename = typename std::enable_if<details::texture_traits<_Src_type>::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, const index<_Src_type::rank> &_Src_offset, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Src_type::rank> &_Copy_extent)
4732 {
4733  static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type.");
4734 
4737  _Get_section_size(_Src, _Copy_extent));
4738 
4739  _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Dst, _Dst_offset, _Copy_extent);
4740 
4742 }
4743 
4744 namespace details
4745 {
4746 template<int _Rank>
4747 Concurrency::extent<_Rank> _Make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, _Texture_base_type_id _Id, _Inout_ _Texture ** _Tex, DXGI_FORMAT _View_format) __CPU_ONLY
4748 {
4749  if (_D3D_texture == NULL)
4750  {
4751  throw runtime_exception("NULL D3D texture pointer.", E_INVALIDARG);
4752  }
4753 
4755  throw runtime_exception("Cannot create D3D texture on a non-D3D accelerator_view.", E_INVALIDARG);
4756  }
4757 
4758  _Texture * _Tex_ptr = _Texture::_Adopt_texture(_Rank, _Id, _D3D_texture, _Av, _View_format);
4759  if (_Tex_ptr->_Is_staging())
4760  {
4761  _Tex_ptr->_Map_buffer(_Write_access, true /* _Wait */);
4762  }
4763  Concurrency::extent<_Rank> _Ext = Concurrency::graphics::details::_Create_extent<_Rank>(_Tex_ptr->_Get_width(), _Tex_ptr->_Get_height(), _Tex_ptr->_Get_depth());
4764 
4765  _Is_valid_extent(_Ext);
4767 
4768  *_Tex = _Tex_ptr;
4769  return _Ext;
4770 }
4771 
4772 #pragma warning( pop )
4773 } // namespace details
4774 
4775 namespace direct3d
4776 {
4792  template<typename _Value_type, int _Rank> _Ret_ IUnknown *get_texture(const texture<_Value_type, _Rank> &_Texture) __CPU_ONLY
4793  {
4795  }
4796 
4812 #pragma warning( push )
4813 #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated
4814  template<typename _Value_type, int _Rank> _Ret_ IUnknown *get_texture(const writeonly_texture_view<_Value_type, _Rank> &_Texture) __CPU_ONLY
4815  {
4817  }
4818 #pragma warning( pop )
4819 
4835  template<typename _Value_type, int _Rank> _Ret_ IUnknown *get_texture(const texture_view<_Value_type, _Rank> &_Texture) __CPU_ONLY
4836  {
4838  }
4839 
4864  template<typename _Value_type, int _Rank> texture<_Value_type, _Rank> make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture,
4865  DXGI_FORMAT _View_format /*= DXGI_FORMAT_UKNNOWN*/) __CPU_ONLY
4866  {
4867  _Texture * _Tex_ptr = NULL;
4868 #pragma warning( suppress: 6326 ) // Potential comparison of a constant with another constant
4869  Concurrency::extent<_Rank> _Ext = Concurrency::graphics::details::_Make_texture<_Rank>(_Av, _D3D_texture,
4870  _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type ? _Uint_type : _Short_vector_type_traits<_Value_type>::_Format_base_type_id,
4871  &_Tex_ptr, _View_format);
4872 
4873  _ASSERTE(_Tex_ptr);
4875  }
4876 
4889  inline _Ret_ IUnknown * get_sampler(const Concurrency::accelerator_view &_Av, const sampler &_Sampler) __CPU_ONLY
4890  {
4892  }
4893 
4903  inline sampler make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY
4904  {
4905  return sampler(_Sampler_descriptor(_Sampler::_Create(_D3D_interop::_Get_D3D_sampler_data_ptr(_D3D_sampler))));
4906  }
4907 
4930  inline uint4 msad4(uint _Reference, uint2 _Source, uint4 _Accum) __GPU_ONLY
4931  {
4932  uint4 _Tmp;
4933  __dp_d3d_msad4(reinterpret_cast<uint*>(&_Tmp), _Reference, _Source.x, _Source.y, _Accum.x, _Accum.y, _Accum.z, _Accum.w);
4934  return _Tmp;
4935  }
4936 } // namespace direct3d
4937 
4938 } //namespace graphics
4939 } //namespace Concurrency
4940 
4941 
4942 
void copy(const _Src_type &_Src, _Out_ void *_Dst, unsigned int _Dst_byte_size)
Copies the contents of the source texture into the destination host buffer.
Definition: amp_graphics.h:4104
#define _Out_
Definition: sal.h:351
texture & operator=(const texture &_Other)
Copy assignment operator. Deep copy
Definition: amp_graphics.h:2111
uint_2::value_type _Scalar_type
Definition: amp_graphics.h:60
_Check_return_opt_ _In_ long _Offset
Definition: io.h:334
texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with integer _E0 and initialized from a pair of iterators into a container...
Definition: amp_graphics.h:1064
friend class accelerator
Definition: amprt.h:1520
texture_view< const _Value_type, _Rank > & operator=(const texture_view< const _Value_type, _Rank > &_Other) __GPU
Assignment operator. This read-only texture_view becomes a view of the same texture which _Other is a...
Definition: amp_graphics.h:3225
_Sampler_descriptor _M_sampler_descriptor
Definition: amp_graphics.h:3109
unsigned int uint
Definition: amp_short_vectors.h:498
const _Value_type value_type
Definition: amp_graphics.h:3127
Definition: amp_graphics.h:2884
unsigned int _Get_bits_per_element() const
Definition: amprt.h:2327
Represent a short vector of 2 unorm's.
Definition: amp_short_vectors.h:11343
_OutIt copy(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2072
sampler & operator=(const sampler &_Other) __GPU
Assignment operator.
Definition: amp_graphics.h:3006
value_type sample(const coordinates_type &_Coord, float _Level_of_detail=0.0f) const __GPU_ONLY
Sample the texture at the given coordinates and level of detail using the predefined sampling configu...
Definition: amp_graphics.h:3422
unsigned int _Get_section_size(const _Texture_base< _Value_type, _Rank > &_Tex, const extent< _Rank > &_Extent)
Definition: amp_graphics.h:3688
static const unsigned int _Default_bits_per_channel
Definition: amp_graphics.h:44
_CRTIMP _In_ int _Value
Definition: setjmp.h:190
_Short_vector_base_type_id
Definition: amprt.h:285
texture(int _E0, int _E1, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from two integer extents, bound to a specific accelerator_view and a...
Definition: amp_graphics.h:841
void set(const index< _Rank > &_Index, const value_type &_Value) const __GPU_ONLY
Set the element indexed by _Index with value _Value.
Definition: amp_graphics.h:2861
uint4 msad4(uint _Reference, uint2 _Source, uint4 _Accum) __GPU_ONLY
Compares a 4-byte reference value and an 8-byte source value and accumulates a vector of 4 sums...
Definition: amp_graphics.h:4930
void _Get_preferred_copy_chunk_extent(unsigned int _Rank, size_t _Width, size_t _Height, size_t _Depth, size_t _Bits_per_element, _Out_writes_(3) size_t *_Preferred_copy_chunk_extent)
Definition: amprt.h:2693
static _AMPIMP _Ret_ IUnknown *__cdecl _Get_D3D_texture(_In_ _Texture *_Texture_ptr)
unchecked_array_iterator< _Iterator > make_unchecked_array_iterator(_Iterator _Ptr)
Definition: iterator:729
double_4::value_type _Scalar_type
Definition: amp_graphics.h:282
void _Initialize(const Concurrency::accelerator_view &_Av, const void *_Source, unsigned int _Src_byte_size) __CPU_ONLY
Definition: amp_graphics.h:2515
Definition: amprt.h:2445
texture(const texture_view< _Value_type, _Rank > &_Src, const Concurrency::accelerator_view &_Acc_view)
Construct a texture from a texture_view on another accelerator_view. Deep copy
Definition: amp_graphics.h:1986
sampler(sampler &&_Other) __GPU
Move constructor.
Definition: amp_graphics.h:2987
A texture is a data aggregate on an accelerator_view in the extent domain. It is a collection of vari...
Definition: amp_graphics.h:613
texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with two integers and specified bits per scalar element
Definition: amp_graphics.h:1303
_Ret_ IUnknown * get_texture(const texture< _Value_type, _Rank > &_Texture) __CPU_ONLY
Get the D3D texture interface underlying a texture.
Definition: amp_graphics.h:4792
__declspec(property(get=get_associated_accelerator_view)) Concurrency Concurrency::accelerator_view get_associated_accelerator_view() const __CPU_ONLY
Returns the accelerator_view that is the preferred target where this texture can be copied...
Definition: amp_graphics.h:2379
unsigned int _Get_texture_format() const
Definition: amprt.h:2306
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
extent< _Rank > _Get_extent_at_level(const extent< _Rank > &_Base_extent, unsigned int _Level)
Definition: xxamp_inl.h:141
sampler(const sampler &_Other) __GPU
Copy constructor.
Definition: amp_graphics.h:2973
texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with integer _E0 and specified bits per scalar element, bound to a specific accelerator and an associated accelerator_view that is the preferred location for copying to/from this texture.
Definition: amp_graphics.h:1455
Represent a short vector of 3 double's.
Definition: amp_short_vectors.h:18932
~texture_view() __GPU
Destructor
Definition: amp_graphics.h:3252
texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with three integers and specified bits per scalar element...
Definition: amp_graphics.h:1573
const value_type operator()(const index< _Rank > &_Index) const __GPU_ONLY
Get the element value indexed by _Index.
Definition: amp_graphics.h:2237
Definition: amprt.h:289
Concurrency::extent< _Rank > _Create_extent(size_t _Width, size_t _Height, size_t _Depth)
Definition: amp_graphics.h:360
float value_type
Definition: amp_short_vectors.h:8219
const value_type operator[](const index< _Rank > &_Index) const __GPU_ONLY
Get the element value indexed by _Index.
Definition: amp_graphics.h:2744
Represent a unorm number. Each element is a floating point number in the range of [0...
Definition: amp_short_vectors.h:37
_AMPIMP accelerator_view _Get_accelerator_view() const
texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with integer _E0 and specified bits per scalar element, bound to a specific...
Definition: amp_graphics.h:1427
__declspec(property(get=get_accelerator_view)) Concurrency Concurrency::accelerator_view get_accelerator_view() const __CPU_ONLY
Returns the accelerator_view where this texture or texture view is located.
Definition: amp_graphics.h:460
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from extents and specified bits per scalar element, bound to a specific accelerat...
Definition: amp_graphics.h:1354
texture(const texture_view< const _Value_type, _Rank > &_Src, const Concurrency::accelerator_view &_Acc_view)
Construct a texture from a read-only texture_view on another accelerator_view. Deep copy ...
Definition: amp_graphics.h:2001
Represent a short vector of 4 unsigned int's.
Definition: amp_short_vectors.h:1721
_Ret_ _Texture * _Get_texture(const _Texture_type &_Tex) __CPU_ONLY
Definition: xxamp.h:1101
texture(int _E0, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with integer _E0 and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view and an associated accelerator_view that is the preferred location for copying to/from this texture.
Definition: amp_graphics.h:1804
Represent a short vector of 2 unsigned int's.
Definition: amp_short_vectors.h:522
float_2::value_type _Scalar_type
Definition: amp_graphics.h:141
bool _Get_chunked_staging_texture(_In_ _Texture *_Tex, const size_t *_Copy_chunk_extent, _Inout_ size_t *_Remaining_copy_extent, _Out_ size_t *_Curr_copy_extent, _Out_ _Texture_ptr *_Staging_texture)
Definition: amprt.h:2519
_Texture_base(const _Texture_base &_Src, bool _Flatten_mipmap_levels) __GPU_ONLY
Definition: amp_graphics.h:546
address_mode
address modes supported for texture sampling
Definition: amp_graphics.h:2880
texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with two integers and specified bits per scalar element, bound to a specific accelerator and an associated accelerator_view that is the preferred location for copying to/from this texture.
Definition: amp_graphics.h:1511
unorm_2::value_type _Scalar_type
Definition: amp_graphics.h:181
A sampler class aggregates sampling configuration information to be used for texture sampling...
Definition: amp_graphics.h:2892
texture(const Concurrency::extent< _Rank > &_Ext, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from extents and specified bits per scalar element, initialized from a host buffe...
Definition: amp_graphics.h:1710
__declspec(property(get=get_bits_per_scalar_element)) unsigned int bits_per_scalar_element
Returns the number of bits per scalar element
void _Copy_to(const _Texture_base &_Dest) const __CPU_ONLY
Definition: amp_graphics.h:558
_CRTIMP _In_opt_z_ const wchar_t _In_opt_z_ const wchar_t unsigned int
Definition: crtdefs.h:642
#define __GPU
Definition: amprt.h:41
void _Initialize(const Concurrency::accelerator_view &_Av, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Definition: amp_graphics.h:2504
norm_4::value_type _Scalar_type
Definition: amp_graphics.h:241
size_t _Get_row_pitch() const
Definition: amprt.h:2387
sampler(address_mode _Address_mode, float_4 _Border_color=float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY
Constructs a sampler with default filter mode (filter_linear, same for min, mag, mip), but specified addressing mode (same for all dimensions) and border color.
Definition: amp_graphics.h:2938
std::array< size_t, 3 > _Get_dimensions(const Concurrency::extent< _Rank > &_Ext, unsigned int _Mip_offset)
Definition: amp_graphics.h:298
texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with two integers and specified bits per scalar element, bound to a specific...
Definition: amp_graphics.h:1480
texture(const Concurrency::extent< _Rank > &_Ext, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from extents, bound to a specific accelerator_view.
Definition: amp_graphics.h:729
texture(const Concurrency::extent< _Rank > &_Ext, const _Texture_descriptor &_Descriptor)
Definition: amp_graphics.h:2386
_OutIt move(_InIt _First, _InIt _Last, _OutIt _Dest)
Definition: xutility:2447
uint_3::value_type _Scalar_type
Definition: amp_graphics.h:70
Class represents a virtual device abstraction on a C++ AMP data-parallel accelerator ...
Definition: amprt.h:1518
__declspec(property(get=get_row_pitch)) unsigned int row_pitch
Returns the row pitch (in bytes) of a 2D or 3D staging texture on the CPU to be used for navigating t...
const gather_return_type _Gather(const coordinates_type &_Coord, unsigned int _Component) const __GPU_ONLY
Definition: amp_graphics.h:3603
unsigned int get_mipmap_levels() const __GPU
Definition: amp_graphics.h:482
texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with two integers and initialized from a pair of iterators into a container...
Definition: amp_graphics.h:1121
static _AMPIMP _Ret_ IUnknown *__cdecl _Get_D3D_buffer(_In_ _Buffer *_Buffer_ptr)
Represent a short vector of 3 unorm's.
Definition: amp_short_vectors.h:11718
double_2::value_type _Scalar_type
Definition: amp_graphics.h:262
texture(const Concurrency::extent< _Rank > &_Ext, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from extents and specified bits per scalar element, initialized from a ho...
Definition: amp_graphics.h:1743
The Concurrency namespace provides classes and functions that provide access to the Concurrency Runti...
Definition: agents.h:42
Class represents a future corresponding to a C++ AMP asynchronous operation
Definition: amprt.h:1342
Represent a short vector of 2 int's.
Definition: amp_short_vectors.h:4189
Definition: amprt.h:291
static void _Is_valid_extent(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1203
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
void _Initialize(const Concurrency::accelerator_view &_Av) __CPU_ONLY
Definition: amp_graphics.h:2473
unsigned int _Get_most_detailed_mipmap_level() const __GPU
Definition: amp_graphics.h:581
texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with integer _E0 and initialized from a pair of iterators into a con...
Definition: amp_graphics.h:1094
size_t _Get_depth(unsigned int _Mip_offset=0) const
Definition: amprt.h:2296
texture(const Concurrency::extent< _Rank > &_Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture initialized from a pair of iterators into a container, bound to a specific accelerator_view and an associated accelerator_view that is the preferred location for copying to/from this texture.
Definition: amp_graphics.h:1041
void _Are_valid_mipmap_parameters(unsigned int _Most_detailed_mipmap_level, unsigned int _Mipmap_levels=0)
Definition: xxamp.h:1255
unsigned int _Get_default_bits_per_scalar_element()
Definition: amp_graphics.h:290
short_vector< scalar_type, 4 >::type gather_return_type
Definition: amp_graphics.h:3129
_Value_type value_type
Definition: amp_graphics.h:412
texture(int _E0, int _E1, int _E2, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with three integers and specified bits per scalar element, initialized from a host buffer.
Definition: amp_graphics.h:1682
unsigned int value_type
Definition: amp_short_vectors.h:1724
static const bool is_texture
Definition: amp_graphics.h:3626
texture(int _E0) __CPU_ONLY
Construct texture with the extent _E0
Definition: amp_graphics.h:672
Definition: amprt.h:2228
#define NULL
Definition: crtdbg.h:30
texture(int _E0, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with the extent _E0, bound to a specific accelerator_view and an ass...
Definition: amp_graphics.h:793
Definition: amprt.h:288
unsigned int _Get_num_channels() const
Definition: amprt.h:2316
double value_type
Definition: amp_short_vectors.h:19666
_In_ size_t _In_z_ const char * _Source
Definition: tchar.h:2379
_AMPIMP accelerator_view _Get_access_on_accelerator_view() const
const gather_return_type gather_alpha(const sampler &_Sampler, const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the specified sampling configuration and return the...
Definition: amp_graphics.h:3504
short_vector provides metaprogramming definitions which are useful for programming short vectors gene...
Definition: amp_short_vectors.h:23839
Represent a short vector of 4 double's.
Definition: amp_short_vectors.h:19663
float_4 _M_border_color
Definition: amp_graphics.h:3112
iterator_traits< _InIt >::difference_type distance(_InIt _First, _InIt _Last)
Definition: xutility:755
_AMPIMP bool __cdecl _Is_D3D_accelerator_view(const accelerator_view &_Av)
unsigned int get_data_length() const __CPU_ONLY
Definition: amp_graphics.h:493
_AMPIMP void _Get()
Wait until the _Event completes and throw any exceptions that occur.
Represent a short vector of 3 int's.
Definition: amp_short_vectors.h:4597
short_vector< float, _Rank >::type coordinates_type
Definition: amp_graphics.h:3128
_AMPIMP _Ret_ _Amp_runtime_trace *__cdecl _Get_amp_trace()
const _Sampler_descriptor & _Get_descriptor() const __GPU_ONLY
Definition: amp_graphics.h:3104
texture(int _E0, int _E1, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from two integer extents, bound to a specific accelerator_view.
Definition: amp_graphics.h:814
__declspec(property(get=get_border_color)) Concurrency Concurrency::graphics::float_4 get_border_color() const __GPU
Returns the sampler's border value
Definition: amp_graphics.h:3063
bool _Is_adopted() const
Definition: amprt.h:2157
enum _Short_vector_base_type_id _Texture_base_type_id
Definition: amprt.h:296
texture_view(const texture< _Value_type, _Rank > &_Src) __GPU_ONLY
Construct a read-only texture_view of a texture _Src on an accelerator.
Definition: amp_graphics.h:3137
Represent a norm number. Each element is a floating point number in the range of [-1.0f, 1.0f].
Definition: amp_short_vectors.h:208
texture_view< const _Value_type, _Rank > & operator=(const texture_view< _Value_type, _Rank > &_Other) __CPU_ONLY
Assignment operator from a writable texture_view. This read-only texture_view becomes a view of the s...
Definition: amp_graphics.h:3242
void set(const index< _Rank > &_Index, const value_type &_Value) __GPU_ONLY
Set the element indexed by _Index with value _Value.
Definition: amp_graphics.h:2319
texture(int _E0, int _E1) __CPU_ONLY
Construct a texture from two integer extents.
Definition: amp_graphics.h:690
unsigned int _Get_most_detailed_mipmap_level() const __GPU
Definition: amprt.h:653
#define UINT_MAX
Definition: limits.h:41
extent() __GPU
Default constructor. The value at each dimension is initialized to zero.
Definition: amp.h:404
friend _Event _Copy_async_impl(const _Texture_base< _Value_type, _Rank > &_Src, const index< _Rank > &_Src_offset, const _Texture_base< _Value_type, _Rank > &_Dst, const index< _Rank > &_Dst_offset, const extent< _Rank > &_Copy_extent) __CPU_ONLY
Definition: amp_graphics.h:4022
int_4::value_type _Scalar_type
Definition: amp_graphics.h:120
const gather_return_type gather_alpha(const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the predefined sampling configuration and return th...
Definition: amp_graphics.h:3582
texture(const texture_view< _Value_type, _Rank > &_Src)
Construct a texture from a texture_view. Deep copy
Definition: amp_graphics.h:1959
sampler(const _Sampler_descriptor &_Descriptor) __CPU_ONLY
Definition: amp_graphics.h:3073
void __dp_d3d_msad4(_Out_ unsigned int *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int) __GPU_ONLY
static _AMPIMP _Ret_ IUnknown *__cdecl _Get_D3D_sampler(const Concurrency::accelerator_view &_Av, _In_ _Sampler *_Sampler_ptr)
static void _Is_valid_section(const _T2< _Rank > &_Base_extent, const _T1< _Rank > &_Section_origin, const _T2< _Rank > &_Section_extent) __CPU_ONLY
Definition: xxamp.h:1107
texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from three integer extents, bound to a specific accelerator_view and...
Definition: amp_graphics.h:895
Definition: amprt.h:287
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
void copy_to(texture &_Dest) const
Copy-to, deep copy
Definition: amp_graphics.h:2150
const void * data() const __CPU_ONLY
Returns a CPU pointer to the raw data of this texture.
Definition: amp_graphics.h:2338
~_Texture_base() __GPU
Definition: amp_graphics.h:572
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from extents and specified bits per scalar element, bound to a specific a...
Definition: amp_graphics.h:1406
texture(int _E0, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with integer _E0 and specified bits per scalar element, initialized from a ...
Definition: amp_graphics.h:1770
Concurrency::graphics::filter_mode _M_filter_mode
Definition: amp_graphics.h:3110
_Ret_ T * _Get_ptr() const
Definition: amprt.h:242
unsigned int get_row_pitch() const __CPU_ONLY
Definition: amp_graphics.h:2348
static const bool is_writable
Definition: amp_graphics.h:3627
_AMPIMP void _Map_buffer(_Access_mode _Map_type, bool _Wait)
void _Initialize() __CPU_ONLY
Definition: amp_graphics.h:3089
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture from extents and specified bits per scalar element
Definition: amp_graphics.h:1241
texture(const texture_view< const _Value_type, _Rank > &_Src, const Concurrency::accelerator_view &_Acc_view, const Concurrency::accelerator_view &_Associated_av)
Construct a staging texture from a read-only texture_view on another accelerator_view. Deep copy
Definition: amp_graphics.h:2039
std::array< size_t, 3 > _Get_indices(const index< _Rank > &_Idx)
Definition: amp_graphics.h:329
static const bool _Is_valid_SVT_for_texture
Definition: amp_graphics.h:41
Definition: amp_graphics.h:384
texture(int _E0, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with integer _E0 and specified bits per scalar element, initialized from a h...
Definition: amp_graphics.h:1622
#define _ASSERTE(expr)
Definition: crtdbg.h:216
sampler() __CPU_ONLY
Constructs a sampler with default filter mode (filter_lienar, same for min, mag, mip), addressing mode (address_clamp, same for all dimensions), and border color (float_4(0.0f, 0.0f, 0.0f, 0.0f)).
Definition: amp_graphics.h:2905
_Ret_ void * data() __CPU_ONLY
Returns a CPU pointer to the raw data of this texture.
Definition: amp_graphics.h:2330
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from extents, specified bits per scalar element and number of mipmap levels ...
Definition: amp_graphics.h:1379
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
Concurrency::extent< _Rank > get_mipmap_extent(unsigned int _Mipmap_level) const __CPU_ONLY
Returns the extent for specific mipmap level of this texture or texture view.
Definition: amp_graphics.h:431
unsigned int _Get_view_mipmap_levels() const __GPU
Definition: amprt.h:658
texture(int _E0, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with integer _E0 and specified bits per scalar element
Definition: amp_graphics.h:1281
#define _In_
Definition: sal.h:314
int_2::value_type _Scalar_type
Definition: amp_graphics.h:100
_Texture_base(const _Texture_base &_Src) __GPU
Definition: amp_graphics.h:532
const gather_return_type gather_red(const sampler &_Sampler, const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the specified sampling configuration and return the...
Definition: amp_graphics.h:3449
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Definition: amp_graphics.h:2479
texture_view(const texture_view< const _Value_type, _Rank > &_Other) __GPU
Construct a read-only texture_view from another read-only texture_view. Both are views of the same te...
Definition: amp_graphics.h:3195
Definition: amp_graphics.h:2874
Represent a short vector of 4 unorm's.
Definition: amp_short_vectors.h:12488
const gather_return_type gather_blue(const sampler &_Sampler, const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the specified sampling configuration and return the...
Definition: amp_graphics.h:3485
_Texture_base(const _Texture_base &_Src, unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels) __CPU_ONLY
Definition: amp_graphics.h:538
void copy_to(const writeonly_texture_view< _Value_type, _Rank > &_Dest) const
Copy-to, deep copy
Definition: amp_graphics.h:2174
norm_2::value_type _Scalar_type
Definition: amp_graphics.h:221
struct Concurrency::details::_Sampler_descriptor _Sampler_descriptor
texture & operator=(texture< _Value_type, _Rank > &&_Other)
Move assignment operator
Definition: amp_graphics.h:2131
texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with three integers and specified bits per scalar element, bound to a specific accelerator.
Definition: amp_graphics.h:1539
#define __CPU_ONLY
Definition: amprt.h:43
Definition: amp_graphics.h:615
IUnknown * _M_data_ptr
Definition: amprt.h:553
texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with two integers and initialized from a pair of iterators into a co...
Definition: amp_graphics.h:1154
Represent a short vector of 2 double's.
Definition: amp_short_vectors.h:18590
_Short_vector_type_traits< _Value_type >::_Scalar_type scalar_type
Definition: amp_graphics.h:413
Exception thrown due to a C++ AMP runtime_exception. This is the base type for all C++ AMP exception ...
Definition: amprt.h:835
sampler(filter_mode _Filter_mode) __CPU_ONLY
Constructs a sampler with specified filter mode (same for min, mag, mip), but with default addressing...
Definition: amp_graphics.h:2920
_Ret_ _Sampler * _Get_sampler_ptr() const __CPU_ONLY
Definition: amp_graphics.h:3084
_Ret_ void * _Get_host_ptr() const
Definition: amprt.h:2095
_AMPIMP accelerator __cdecl _Select_default_accelerator()
texture(const Concurrency::extent< _Rank > &_Ext, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture from extents and specified bits per scalar element, initialized from a host buffe...
Definition: amp_graphics.h:1598
texture(const Concurrency::extent< _Rank > &_Ext, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Construct a texture initialized from a pair of iterators into a container.
Definition: amp_graphics.h:916
bool _Are_mipmap_levels_overlapping(const _Texture_descriptor *_Other) const __CPU_ONLY
Definition: amprt.h:694
texture(const Concurrency::extent< _Rank > &_Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture initialized from a pair of iterators into a container, bound to a specific accele...
Definition: amp_graphics.h:1012
void _Initialize(const Concurrency::accelerator_view &_Av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Definition: amp_graphics.h:2493
texture_view(const texture_view< const _Value_type, _Rank > &_Other, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY
Construct a read-only texture_view from another read-only texture_view. Allows narrowing down the acc...
Definition: amp_graphics.h:3214
Represent a short vector of 3 float's.
Definition: amp_short_vectors.h:8216
extent< _Rank > _Get_extent_at_level_unsafe(const extent< _Rank > &_Base_extent, unsigned int _Level) __GPU
Definition: xxamp_inl.h:95
unsigned int get_depth_pitch() const __CPU_ONLY
Definition: amp_graphics.h:2364
Represent a short vector of 4 int's.
Definition: amp_short_vectors.h:5400
unsigned int _Get_data_length(unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels, const size_t *_Extents=nullptr) const
Definition: amprt.h:2332
Concurrency::extent< _Rank > _Make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, _Texture_base_type_id _Id, _Inout_ _Texture **_Tex, DXGI_FORMAT _View_format) __CPU_ONLY
Definition: amp_graphics.h:4747
Concurrency::extent< _Rank > get_mipmap_extent(unsigned int _Mipmap_level) const __GPU_ONLY
Returns the extent for specific mipmap level of this texture or texture view.
Definition: amp_graphics.h:451
const gather_return_type gather_blue(const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the predefined sampling configuration and return th...
Definition: amp_graphics.h:3562
_Ret_ _Sampler * _Get_sampler_ptr() const __CPU_ONLY
Definition: amprt.h:775
const gather_return_type gather_red(const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the predefined sampling configuration and return th...
Definition: amp_graphics.h:3524
A texture_view provides read-only access and sampling capability to a textu...
Definition: amp_graphics.h:3124
float value_type
Definition: amp_short_vectors.h:8950
unsigned int _Get_max_mipmap_levels(const extent< _Rank > &_Extent)
Definition: xxamp.h:1234
The extent type represents an N-dimensional vector of int which specifies the bounds of an N-dimen...
Definition: amp.h:383
void * _M_data_ptr
Definition: amprt.h:715
texture(const texture &_Src)
Copy constructor. Deep copy
Definition: amp_graphics.h:2051
texture(int _E0, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with the extent _E0, bound to a specific accelerator_view.
Definition: amp_graphics.h:769
unsigned int _Get_rank() const
Definition: amprt.h:2301
size_t _Get_depth_pitch() const
Definition: amprt.h:2397
value_type sample(const sampler &_Sampler, const coordinates_type &_Coord, float _Level_of_detail=0.0f) const __GPU_ONLY
Sample the texture at the given coordinates and level of detail using the specified sampling configur...
Definition: amp_graphics.h:3391
texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Construct a texture with the extent _E0 and from a pair of iterators into a container.
Definition: amp_graphics.h:936
unsigned int value_type
Definition: amp_short_vectors.h:525
_AMPIMP ULONG _Launch_async_copy_event_helper(const _Buffer_descriptor &_Src, const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy)
Definition: amp_graphics.h:2882
void _Initialize(const Concurrency::accelerator_view &_Av, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Definition: amp_graphics.h:2463
unorm_3::value_type _Scalar_type
Definition: amp_graphics.h:191
texture(const texture &_Src, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av)
Copy constructor. Deep copy
Definition: amp_graphics.h:2096
_Event _Copy_async_impl(const void *_Src, unsigned int _Src_byte_size, const _Texture_base< _Value_type, _Rank > &_Dst, const index< _Rank > &_Offset, const Concurrency::extent< _Rank > &_Copy_extent)
Definition: amp_graphics.h:3922
Definition: amp_graphics.h:3624
texture(int _E0, int _E1, int _E2, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with three integers and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view.
Definition: amp_graphics.h:1906
texture_view(const texture< _Value_type, _Rank > &_Src) __CPU_ONLY
Construct a texture_view of a texture _Src on the host.
Definition: amp_graphics.h:3150
texture_view(texture< _Value_type, _Rank > &_Src, unsigned int _Mipmap_level=0) __CPU_ONLY
Construct a texture_view of a texture _Src on host.
Definition: amp_graphics.h:2679
int_3::value_type _Scalar_type
Definition: amp_graphics.h:110
static const _Short_vector_base_type_id _Format_base_type_id
Definition: amp_graphics.h:42
texture(int _E0, int _E1, int _E2, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with three integers and specified bits per scalar element...
Definition: amp_graphics.h:1946
_AMPIMP ULONG _Start_copy_event_helper(const _Buffer_descriptor &_Src, const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy)
size_t _Get_height(unsigned int _Mip_offset=0) const
Definition: amprt.h:2291
void _Set_view_mipmap_levels(unsigned int _View_mipmap_levels) __CPU_ONLY
Definition: amprt.h:663
__declspec(property(get=get_address_mode)) Concurrency Concurrency::graphics::address_mode get_address_mode() const __GPU
Returns the sampler's address mode
Definition: amp_graphics.h:3054
Concurrency::graphics::address_mode _M_address_mode
Definition: amp_graphics.h:3111
void _Set_texture_ptr(_In_opt_ _Texture *_Texture_ptr) __CPU_ONLY
Definition: amprt.h:668
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Definition: amp_graphics.h:2498
texture(texture &&_Other)
Move constructor
Definition: amp_graphics.h:2063
Definition: amp_graphics.h:2883
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:695
Definition: amp_graphics.h:2873
const value_type operator[](const index< _Rank > &_Index) const __GPU_ONLY
Get the element value indexed by _Index.
Definition: amp_graphics.h:2206
void _Initialize(const Concurrency::accelerator_view &_Av, const details::_Texture_base< _Value_type, _Rank > &_Src) __CPU_ONLY
Definition: amp_graphics.h:2549
texture(const texture &_Src, const Concurrency::accelerator_view &_Av)
Copy constructor. Deep copy
Definition: amp_graphics.h:2077
#define _T(x)
Definition: tchar.h:2498
Concurrency::details::_Texture_descriptor _Texture_descriptor
Definition: amp_graphics.h:500
double value_type
Definition: amp_short_vectors.h:18593
__declspec(property(get=get_filter_mode)) Concurrency Concurrency::graphics::filter_mode get_filter_mode() const __GPU
Returns the sampler's filter mode
Definition: amp_graphics.h:3045
void _Is_valid_data_length(unsigned int _Num_elems, unsigned int _Bits_per_elem)
Definition: amp_graphics.h:596
texture< _Value_type, _Rank > make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format=DXGI_FORMAT_UNKNOWN) __CPU_ONLY
Create an texture from a D3D texture interface pointer, optionally using the specified DXGI format fo...
Definition: amp_graphics.h:4864
texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with three integers and initialized from a pair of iterators into a containe...
Definition: amp_graphics.h:1184
Represent a short vector of 2 float's.
Definition: amp_short_vectors.h:7874
sampler(filter_mode _Filter_mode, address_mode _Address_mode, float_4 _Border_color=float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY
Constructs a sampler with specified filter mode (same for min, mag, mip), addressing mode (same for a...
Definition: amp_graphics.h:2959
float_3::value_type _Scalar_type
Definition: amp_graphics.h:151
Represent a short vector of 3 norm's.
Definition: amp_short_vectors.h:15325
_Texture_descriptor _M_texture_descriptor
Definition: amp_graphics.h:593
A texture_view provides read and write access to a texture. Note that currently texture_view can only...
Definition: amp_graphics.h:617
double_3::value_type _Scalar_type
Definition: amp_graphics.h:272
Represent a short vector of 4 float's.
Definition: amp_short_vectors.h:8947
_Ret_ _Texture * _Get_texture() const __CPU_ONLY
Definition: amp_graphics.h:576
double value_type
Definition: amp_short_vectors.h:18935
const gather_return_type gather_green(const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the predefined sampling configuration and return th...
Definition: amp_graphics.h:3542
float_4::value_type _Scalar_type
Definition: amp_graphics.h:161
_Texture_base(const Concurrency::extent< _Rank > &_Ext, unsigned int _Mipmap_levels=1) __CPU_ONLY
Definition: amp_graphics.h:508
bool operator==(const _Texture_base &_Other) const __CPU_ONLY
Definition: amp_graphics.h:567
const gather_return_type _Gather(const sampler &_Sampler, const coordinates_type &_Coord, unsigned int _Component) const __GPU_ONLY
Definition: amp_graphics.h:3590
Define an N-dimensional index point; which may also be viewed as a vector based at the origin in N-sp...
Definition: amp.h:53
unsigned int value_type
Definition: amp_short_vectors.h:927
static const unsigned int _Num_channels
Definition: amp_graphics.h:43
texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Construct a texture with two integers and initialized from a pair of iterators into a container...
Definition: amp_graphics.h:960
texture_view(const texture_view< _Value_type, _Rank > &_Other) __CPU_ONLY
Construct a read-only texture_view of a writable texture_view.
Definition: amp_graphics.h:3184
#define _Inout_
Definition: sal.h:384
texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY
Construct a texture with three integers and initialized from a pair of iterators into a containe...
Definition: amp_graphics.h:988
filter_mode
filter modes supported for texture sampling
Definition: amp_graphics.h:2870
texture(int _E0, int _E1, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture with two integers and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view.
Definition: amp_graphics.h:1835
bool _Are_mipmap_levels_overlapping(const _Texture_base &_Other) const __CPU_ONLY
Definition: amp_graphics.h:586
uint_4::value_type _Scalar_type
Definition: amp_graphics.h:80
texture_view(const texture_view< _Value_type, _Rank > &_Other) __GPU
Construct a texture_view from another texture_view. Both are views of the same texture.
Definition: amp_graphics.h:2707
_Ret_ _Texture * _Get_texture_ptr() const __CPU_ONLY
Definition: amprt.h:647
Definition: amprt.h:312
texture(const Concurrency::extent< _Rank > &_Ext) __CPU_ONLY
Construct a texture from extents.
Definition: amp_graphics.h:658
Concurrency::extent< _Rank > _M_extent
Definition: amp_graphics.h:592
Concurrency::details::_Sampler_descriptor _Sampler_descriptor
Definition: amp_graphics.h:3070
void _Copy_data_on_host(int _Rank, _Input_iterator _Src, _Out_ _Value_type *_Dst, size_t _Width, size_t _Height, size_t _Depth, size_t _Dst_row_pitch_in_bytes, size_t _Dst_depth_pitch_in_bytes, size_t _Src_row_pitch, size_t _Src_depth_pitch)
Definition: amprt.h:2564
Definition: amprt.h:292
_Texture_base(const Concurrency::extent< _Rank > &_Ext, const _Texture_descriptor &_Desc) __CPU_ONLY
Definition: amp_graphics.h:552
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Definition: amp_graphics.h:2468
texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view &_Av) __CPU_ONLY
Construct a texture from three integer extents, bound to a specific accelerator_view.
Definition: amp_graphics.h:865
basic_stringstream< char, char_traits< char >, allocator< char > > stringstream
Definition: iosfwd:687
texture(const Concurrency::extent< _Rank > &_Ext, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture from extents, bound to a specific accelerator_view and an associated acce...
Definition: amp_graphics.h:752
float value_type
Definition: amp_short_vectors.h:7877
int value_type
Definition: amp_short_vectors.h:5403
texture(int _E0, int _E1, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with two integers and specified bits per scalar element, initialized from a ...
Definition: amp_graphics.h:1650
unsigned int get_bits_per_scalar_element() const __CPU_ONLY
Definition: amp_graphics.h:469
~texture_view() __GPU
Destructor
Definition: amp_graphics.h:2731
sampler make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY
Create a sampler from a D3D sampler state interface pointer.
Definition: amp_graphics.h:4903
Definition: amprt.h:86
Definition: amp_graphics.h:2885
void _Is_valid_mipmap_range(unsigned int _Src_view_mipmap_levels, unsigned int _Dst_most_detailed_level, unsigned int _Dst_view_mipmap_levels)
Definition: xxamp.h:1275
concurrency::completion_future copy_async(const _Src_type &_Src, _Out_ void *_Dst, unsigned int _Dst_byte_size)
Asynchronously copies the contents of the source texture into the destination host buffer...
Definition: amp_graphics.h:4237
void _Set_sampler_ptr(_In_opt_ _Sampler *_Sampler_ptr) __CPU_ONLY
Definition: amprt.h:780
texture(int _E0, int _E1, int _E2) __CPU_ONLY
Construct a texture from three integer extents.
Definition: amp_graphics.h:711
const gather_return_type gather_green(const sampler &_Sampler, const coordinates_type &_Coord) const __GPU_ONLY
Sample the texture at the given coordinates using the specified sampling configuration and return the...
Definition: amp_graphics.h:3466
#define __GPU_ONLY
Definition: amprt.h:42
norm_3::value_type _Scalar_type
Definition: amp_graphics.h:231
size_t _Get_width(unsigned int _Mip_offset=0) const
Definition: amprt.h:2286
_Ret_ IUnknown * get_sampler(const Concurrency::accelerator_view &_Av, const sampler &_Sampler) __CPU_ONLY
Get the D3D sampler state interface on the given accelerator view that represents the specified sampl...
Definition: amp_graphics.h:4889
int value_type
Definition: amp_short_vectors.h:4600
bool _Is_staging() const
Definition: amprt.h:2127
const value_type operator()(const index< _Rank > &_Index) const __GPU_ONLY
Get the element value indexed by _Index.
Definition: amp_graphics.h:2779
Definition: amp_graphics.h:2886
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, const details::_Texture_base< _Value_type, _Rank > &_Src) __CPU_ONLY
Definition: amp_graphics.h:2520
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Definition: amp_graphics.h:2396
bool _Is_cpu_accelerator(const accelerator &_Accl)
Definition: amprt.h:3469
~texture() __CPU_ONLY
Destructor
Definition: amp_graphics.h:2193
struct Concurrency::details::_Texture_descriptor _Texture_descriptor
texture_view(const texture< _Value_type, _Rank > &_Src, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY
Construct a read-only texture_view with specific range of mipmap levels of a texture _Src on the host...
Definition: amp_graphics.h:3170
texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with three integers and initialized from a pair of iterators into a ...
Definition: amp_graphics.h:1220
bool _Should_create_staging_texture(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av)
Definition: amp_graphics.h:2391
static const int rank
Definition: amp_graphics.h:411
texture(int _E0, int _E1, const void *_Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) __CPU_ONLY
Construct a staging texture with two integers and specified bits per scalar element...
Definition: amp_graphics.h:1872
sampler & operator=(sampler &&_Other) __GPU
Move assignment operator.
Definition: amp_graphics.h:3027
Definition: amp_graphics.h:2872
texture_view(texture< _Value_type, _Rank > &_Src) __GPU_ONLY
Construct a texture_view of a texture _Src on an accelerator.
Definition: amp_graphics.h:2693
unsigned int _Get_bits_per_channel() const
Definition: amprt.h:2321
Definition: set:42
_Texture_base() __CPU_ONLY
Definition: amp_graphics.h:502
Represent a short vector of 3 unsigned int's.
Definition: amp_short_vectors.h:924
Definition: amprt.h:290
_FwdIt _Last
Definition: algorithm:1936
unorm_4::value_type _Scalar_type
Definition: amp_graphics.h:201
Represent a short vector of 2 norm's.
Definition: amp_short_vectors.h:14929
void _Initialize(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av, const void *_Source, unsigned int _Src_byte_size) __CPU_ONLY
Definition: amp_graphics.h:2509
const _Texture_descriptor & _Get_texture_descriptor(const _Texture_type &_Tex) __GPU
Definition: xxamp.h:1095
texture(const texture_view< _Value_type, _Rank > &_Src, const Concurrency::accelerator_view &_Acc_view, const Concurrency::accelerator_view &_Associated_av)
Construct a staging texture from a texture_view on another accelerator_view. Deep copy ...
Definition: amp_graphics.h:2020
_AMPIMP void _Write_end_event(ULONG _Span_id)
#define _Ret_
Definition: sal.h:1005
int value_type
Definition: amp_short_vectors.h:4192
concurrency::completion_future _Start_async_op_wait_event_helper(ULONG _Async_op_id, _Event _Ev)
Definition: amprt.h:3912
texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element) __CPU_ONLY
Construct a texture with three integers and specified bits per scalar element ...
Definition: amp_graphics.h:1331
Represent a short vector of 4 norm's.
Definition: amp_short_vectors.h:16120
_Pre_maybenull_ _Post_z_ char _Pre_maybenull_ _Post_z_ char _Pre_maybenull_ _Post_z_ char _Pre_maybenull_ _Post_z_ char * _Ext
Definition: stdlib.h:854
__declspec(property(get=get_extent)) Concurrency Concurrency::extent< _Rank > get_extent() const __GPU
Returns the extent that defines the shape of this texture or texture view.
Definition: amp_graphics.h:420
texture(const Concurrency::extent< _Rank > &_Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels) __CPU_ONLY
Construct a texture from extents, specified bits per scalar element and number of mipmap levels ...
Definition: amp_graphics.h:1263
texture(const texture_view< const _Value_type, _Rank > &_Src)
Construct a texture from a read-only texture_view. Deep copy
Definition: amp_graphics.h:1971
class __declspec(deprecated("writeonly_texture_view is deprecated. Please use texture_view instead.")) writeonly_texture_view
A writeonly_texture_view provides writeonly access to a texture.
Definition: amp_graphics.h:2566
texture_view< _Value_type, _Rank > & operator=(const texture_view< _Value_type, _Rank > &_Other) __GPU
Assignment operator. This texture_view becomes a view of the same texture which _Other is a view of...
Definition: amp_graphics.h:2718