STLdoc
STLdocumentation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
smmintrin.h
Go to the documentation of this file.
1 /* Copyright (C) 2007-2013 Free Software Foundation, Inc.
2 
3  This file is part of GCC.
4 
5  GCC is free software; you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation; either version 3, or (at your option)
8  any later version.
9 
10  GCC is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  Under Section 7 of GPL version 3, you are granted additional
16  permissions described in the GCC Runtime Library Exception, version
17  3.1, as published by the Free Software Foundation.
18 
19  You should have received a copy of the GNU General Public License and
20  a copy of the GCC Runtime Library Exception along with this program;
21  see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22  <http://www.gnu.org/licenses/>. */
23 
24 /* Implemented from the specification included in the Intel C++ Compiler
25  User Guide and Reference, version 10.0. */
26 
27 #ifndef _SMMINTRIN_H_INCLUDED
28 #define _SMMINTRIN_H_INCLUDED
29 
30 #ifndef __SSE4_1__
31 # error "SSE4.1 instruction set not enabled"
32 #else
33 
34 /* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
35  files. */
36 #include <tmmintrin.h>
37 
38 /* Rounding mode macros. */
39 #define _MM_FROUND_TO_NEAREST_INT 0x00
40 #define _MM_FROUND_TO_NEG_INF 0x01
41 #define _MM_FROUND_TO_POS_INF 0x02
42 #define _MM_FROUND_TO_ZERO 0x03
43 #define _MM_FROUND_CUR_DIRECTION 0x04
44 
45 #define _MM_FROUND_RAISE_EXC 0x00
46 #define _MM_FROUND_NO_EXC 0x08
47 
48 #define _MM_FROUND_NINT \
49  (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
50 #define _MM_FROUND_FLOOR \
51  (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
52 #define _MM_FROUND_CEIL \
53  (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
54 #define _MM_FROUND_TRUNC \
55  (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
56 #define _MM_FROUND_RINT \
57  (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
58 #define _MM_FROUND_NEARBYINT \
59  (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
60 
61 /* Test Instruction */
62 /* Packed integer 128-bit bitwise comparison. Return 1 if
63  (__V & __M) == 0. */
64 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
65 _mm_testz_si128 (__m128i __M, __m128i __V)
66 {
67  return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V);
68 }
69 
70 /* Packed integer 128-bit bitwise comparison. Return 1 if
71  (__V & ~__M) == 0. */
72 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
73 _mm_testc_si128 (__m128i __M, __m128i __V)
74 {
75  return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V);
76 }
77 
78 /* Packed integer 128-bit bitwise comparison. Return 1 if
79  (__V & __M) != 0 && (__V & ~__M) != 0. */
80 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
81 _mm_testnzc_si128 (__m128i __M, __m128i __V)
82 {
83  return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V);
84 }
85 
86 /* Macros for packed integer 128-bit comparison intrinsics. */
87 #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
88 
89 #define _mm_test_all_ones(V) \
90  _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V)))
91 
92 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V))
93 
94 /* Packed/scalar double precision floating point rounding. */
95 
96 #ifdef __OPTIMIZE__
97 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
98 _mm_round_pd (__m128d __V, const int __M)
99 {
100  return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M);
101 }
102 
103 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
104 _mm_round_sd(__m128d __D, __m128d __V, const int __M)
105 {
106  return (__m128d) __builtin_ia32_roundsd ((__v2df)__D,
107  (__v2df)__V,
108  __M);
109 }
110 #else
111 #define _mm_round_pd(V, M) \
112  ((__m128d) __builtin_ia32_roundpd ((__v2df)(__m128d)(V), (int)(M)))
113 
114 #define _mm_round_sd(D, V, M) \
115  ((__m128d) __builtin_ia32_roundsd ((__v2df)(__m128d)(D), \
116  (__v2df)(__m128d)(V), (int)(M)))
117 #endif
118 
119 /* Packed/scalar single precision floating point rounding. */
120 
121 #ifdef __OPTIMIZE__
122 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
123 _mm_round_ps (__m128 __V, const int __M)
124 {
125  return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M);
126 }
127 
128 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
129 _mm_round_ss (__m128 __D, __m128 __V, const int __M)
130 {
131  return (__m128) __builtin_ia32_roundss ((__v4sf)__D,
132  (__v4sf)__V,
133  __M);
134 }
135 #else
136 #define _mm_round_ps(V, M) \
137  ((__m128) __builtin_ia32_roundps ((__v4sf)(__m128)(V), (int)(M)))
138 
139 #define _mm_round_ss(D, V, M) \
140  ((__m128) __builtin_ia32_roundss ((__v4sf)(__m128)(D), \
141  (__v4sf)(__m128)(V), (int)(M)))
142 #endif
143 
144 /* Macros for ceil/floor intrinsics. */
145 #define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL)
146 #define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL)
147 
148 #define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
149 #define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR)
150 
151 #define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL)
152 #define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL)
153 
154 #define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR)
155 #define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR)
156 
157 /* SSE4.1 */
158 
159 /* Integer blend instructions - select data from 2 sources using
160  constant/variable mask. */
161 
162 #ifdef __OPTIMIZE__
163 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
164 _mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
165 {
166  return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X,
167  (__v8hi)__Y,
168  __M);
169 }
170 #else
171 #define _mm_blend_epi16(X, Y, M) \
172  ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(X), \
173  (__v8hi)(__m128i)(Y), (int)(M)))
174 #endif
175 
176 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
177 _mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
178 {
179  return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X,
180  (__v16qi)__Y,
181  (__v16qi)__M);
182 }
183 
184 /* Single precision floating point blend instructions - select data
185  from 2 sources using constant/variable mask. */
186 
187 #ifdef __OPTIMIZE__
188 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
189 _mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
190 {
191  return (__m128) __builtin_ia32_blendps ((__v4sf)__X,
192  (__v4sf)__Y,
193  __M);
194 }
195 #else
196 #define _mm_blend_ps(X, Y, M) \
197  ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(X), \
198  (__v4sf)(__m128)(Y), (int)(M)))
199 #endif
200 
201 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
202 _mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
203 {
204  return (__m128) __builtin_ia32_blendvps ((__v4sf)__X,
205  (__v4sf)__Y,
206  (__v4sf)__M);
207 }
208 
209 /* Double precision floating point blend instructions - select data
210  from 2 sources using constant/variable mask. */
211 
212 #ifdef __OPTIMIZE__
213 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
214 _mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
215 {
216  return (__m128d) __builtin_ia32_blendpd ((__v2df)__X,
217  (__v2df)__Y,
218  __M);
219 }
220 #else
221 #define _mm_blend_pd(X, Y, M) \
222  ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(X), \
223  (__v2df)(__m128d)(Y), (int)(M)))
224 #endif
225 
226 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
227 _mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
228 {
229  return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X,
230  (__v2df)__Y,
231  (__v2df)__M);
232 }
233 
234 /* Dot product instructions with mask-defined summing and zeroing parts
235  of result. */
236 
237 #ifdef __OPTIMIZE__
238 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
239 _mm_dp_ps (__m128 __X, __m128 __Y, const int __M)
240 {
241  return (__m128) __builtin_ia32_dpps ((__v4sf)__X,
242  (__v4sf)__Y,
243  __M);
244 }
245 
246 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
247 _mm_dp_pd (__m128d __X, __m128d __Y, const int __M)
248 {
249  return (__m128d) __builtin_ia32_dppd ((__v2df)__X,
250  (__v2df)__Y,
251  __M);
252 }
253 #else
254 #define _mm_dp_ps(X, Y, M) \
255  ((__m128) __builtin_ia32_dpps ((__v4sf)(__m128)(X), \
256  (__v4sf)(__m128)(Y), (int)(M)))
257 
258 #define _mm_dp_pd(X, Y, M) \
259  ((__m128d) __builtin_ia32_dppd ((__v2df)(__m128d)(X), \
260  (__v2df)(__m128d)(Y), (int)(M)))
261 #endif
262 
263 /* Packed integer 64-bit comparison, zeroing or filling with ones
264  corresponding parts of result. */
265 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
266 _mm_cmpeq_epi64 (__m128i __X, __m128i __Y)
267 {
268  return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y);
269 }
270 
271 /* Min/max packed integer instructions. */
272 
273 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
274 _mm_min_epi8 (__m128i __X, __m128i __Y)
275 {
276  return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y);
277 }
278 
279 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
280 _mm_max_epi8 (__m128i __X, __m128i __Y)
281 {
282  return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y);
283 }
284 
285 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
286 _mm_min_epu16 (__m128i __X, __m128i __Y)
287 {
288  return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y);
289 }
290 
291 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
292 _mm_max_epu16 (__m128i __X, __m128i __Y)
293 {
294  return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y);
295 }
296 
297 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
298 _mm_min_epi32 (__m128i __X, __m128i __Y)
299 {
300  return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y);
301 }
302 
303 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
304 _mm_max_epi32 (__m128i __X, __m128i __Y)
305 {
306  return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y);
307 }
308 
309 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
310 _mm_min_epu32 (__m128i __X, __m128i __Y)
311 {
312  return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y);
313 }
314 
315 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
316 _mm_max_epu32 (__m128i __X, __m128i __Y)
317 {
318  return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y);
319 }
320 
321 /* Packed integer 32-bit multiplication with truncation of upper
322  halves of results. */
323 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
324 _mm_mullo_epi32 (__m128i __X, __m128i __Y)
325 {
326  return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y);
327 }
328 
329 /* Packed integer 32-bit multiplication of 2 pairs of operands
330  with two 64-bit results. */
331 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
332 _mm_mul_epi32 (__m128i __X, __m128i __Y)
333 {
334  return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y);
335 }
336 
337 /* Insert single precision float into packed single precision array
338  element selected by index N. The bits [7-6] of N define S
339  index, the bits [5-4] define D index, and bits [3-0] define
340  zeroing mask for D. */
341 
342 #ifdef __OPTIMIZE__
343 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
344 _mm_insert_ps (__m128 __D, __m128 __S, const int __N)
345 {
346  return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D,
347  (__v4sf)__S,
348  __N);
349 }
350 #else
351 #define _mm_insert_ps(D, S, N) \
352  ((__m128) __builtin_ia32_insertps128 ((__v4sf)(__m128)(D), \
353  (__v4sf)(__m128)(S), (int)(N)))
354 #endif
355 
356 /* Helper macro to create the N value for _mm_insert_ps. */
357 #define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
358 
359 /* Extract binary representation of single precision float from packed
360  single precision array element of X selected by index N. */
361 
362 #ifdef __OPTIMIZE__
363 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
364 _mm_extract_ps (__m128 __X, const int __N)
365 {
366  union { int i; float f; } __tmp;
367  __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N);
368  return __tmp.i;
369 }
370 #else
371 #define _mm_extract_ps(X, N) \
372  (__extension__ \
373  ({ \
374  union { int i; float f; } __tmp; \
375  __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(__m128)(X), (int)(N)); \
376  __tmp.i; \
377  }))
378 #endif
379 
380 /* Extract binary representation of single precision float into
381  D from packed single precision array element of S selected
382  by index N. */
383 #define _MM_EXTRACT_FLOAT(D, S, N) \
384  { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
385 
386 /* Extract specified single precision float element into the lower
387  part of __m128. */
388 #define _MM_PICK_OUT_PS(X, N) \
389  _mm_insert_ps (_mm_setzero_ps (), (X), \
390  _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
391 
392 /* Insert integer, S, into packed integer array element of D
393  selected by index N. */
394 
395 #ifdef __OPTIMIZE__
396 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
397 _mm_insert_epi8 (__m128i __D, int __S, const int __N)
398 {
399  return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D,
400  __S, __N);
401 }
402 
403 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
404 _mm_insert_epi32 (__m128i __D, int __S, const int __N)
405 {
406  return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D,
407  __S, __N);
408 }
409 
410 #ifdef __x86_64__
411 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
412 _mm_insert_epi64 (__m128i __D, long long __S, const int __N)
413 {
414  return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D,
415  __S, __N);
416 }
417 #endif
418 #else
419 #define _mm_insert_epi8(D, S, N) \
420  ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(__m128i)(D), \
421  (int)(S), (int)(N)))
422 
423 #define _mm_insert_epi32(D, S, N) \
424  ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(__m128i)(D), \
425  (int)(S), (int)(N)))
426 
427 #ifdef __x86_64__
428 #define _mm_insert_epi64(D, S, N) \
429  ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(__m128i)(D), \
430  (long long)(S), (int)(N)))
431 #endif
432 #endif
433 
434 /* Extract integer from packed integer array element of X selected by
435  index N. */
436 
437 #ifdef __OPTIMIZE__
438 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
439 _mm_extract_epi8 (__m128i __X, const int __N)
440 {
441  return (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N);
442 }
443 
444 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
445 _mm_extract_epi32 (__m128i __X, const int __N)
446 {
447  return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N);
448 }
449 
450 #ifdef __x86_64__
451 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
452 _mm_extract_epi64 (__m128i __X, const int __N)
453 {
454  return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N);
455 }
456 #endif
457 #else
458 #define _mm_extract_epi8(X, N) \
459  ((int) (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)(__m128i)(X), (int)(N)))
460 #define _mm_extract_epi32(X, N) \
461  ((int) __builtin_ia32_vec_ext_v4si ((__v4si)(__m128i)(X), (int)(N)))
462 
463 #ifdef __x86_64__
464 #define _mm_extract_epi64(X, N) \
465  ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(__m128i)(X), (int)(N)))
466 #endif
467 #endif
468 
469 /* Return horizontal packed word minimum and its index in bits [15:0]
470  and bits [18:16] respectively. */
471 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
472 _mm_minpos_epu16 (__m128i __X)
473 {
474  return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X);
475 }
476 
477 /* Packed integer sign-extension. */
478 
479 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
480 _mm_cvtepi8_epi32 (__m128i __X)
481 {
482  return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X);
483 }
484 
485 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
486 _mm_cvtepi16_epi32 (__m128i __X)
487 {
488  return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X);
489 }
490 
491 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
492 _mm_cvtepi8_epi64 (__m128i __X)
493 {
494  return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X);
495 }
496 
497 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
498 _mm_cvtepi32_epi64 (__m128i __X)
499 {
500  return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X);
501 }
502 
503 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
504 _mm_cvtepi16_epi64 (__m128i __X)
505 {
506  return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X);
507 }
508 
509 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
510 _mm_cvtepi8_epi16 (__m128i __X)
511 {
512  return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X);
513 }
514 
515 /* Packed integer zero-extension. */
516 
517 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
518 _mm_cvtepu8_epi32 (__m128i __X)
519 {
520  return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X);
521 }
522 
523 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
524 _mm_cvtepu16_epi32 (__m128i __X)
525 {
526  return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X);
527 }
528 
529 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
530 _mm_cvtepu8_epi64 (__m128i __X)
531 {
532  return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X);
533 }
534 
535 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
536 _mm_cvtepu32_epi64 (__m128i __X)
537 {
538  return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X);
539 }
540 
541 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
542 _mm_cvtepu16_epi64 (__m128i __X)
543 {
544  return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X);
545 }
546 
547 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
548 _mm_cvtepu8_epi16 (__m128i __X)
549 {
550  return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X);
551 }
552 
553 /* Pack 8 double words from 2 operands into 8 words of result with
554  unsigned saturation. */
555 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
556 _mm_packus_epi32 (__m128i __X, __m128i __Y)
557 {
558  return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y);
559 }
560 
561 /* Sum absolute 8-bit integer difference of adjacent groups of 4
562  byte integers in the first 2 operands. Starting offsets within
563  operands are determined by the 3rd mask operand. */
564 
565 #ifdef __OPTIMIZE__
566 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
567 _mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
568 {
569  return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X,
570  (__v16qi)__Y, __M);
571 }
572 #else
573 #define _mm_mpsadbw_epu8(X, Y, M) \
574  ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(__m128i)(X), \
575  (__v16qi)(__m128i)(Y), (int)(M)))
576 #endif
577 
578 /* Load double quadword using non-temporal aligned hint. */
579 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
580 _mm_stream_load_si128 (__m128i *__X)
581 {
582  return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X);
583 }
584 
585 #ifdef __SSE4_2__
586 
587 /* These macros specify the source data format. */
588 #define _SIDD_UBYTE_OPS 0x00
589 #define _SIDD_UWORD_OPS 0x01
590 #define _SIDD_SBYTE_OPS 0x02
591 #define _SIDD_SWORD_OPS 0x03
592 
593 /* These macros specify the comparison operation. */
594 #define _SIDD_CMP_EQUAL_ANY 0x00
595 #define _SIDD_CMP_RANGES 0x04
596 #define _SIDD_CMP_EQUAL_EACH 0x08
597 #define _SIDD_CMP_EQUAL_ORDERED 0x0c
598 
599 /* These macros specify the polarity. */
600 #define _SIDD_POSITIVE_POLARITY 0x00
601 #define _SIDD_NEGATIVE_POLARITY 0x10
602 #define _SIDD_MASKED_POSITIVE_POLARITY 0x20
603 #define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
604 
605 /* These macros specify the output selection in _mm_cmpXstri (). */
606 #define _SIDD_LEAST_SIGNIFICANT 0x00
607 #define _SIDD_MOST_SIGNIFICANT 0x40
608 
609 /* These macros specify the output selection in _mm_cmpXstrm (). */
610 #define _SIDD_BIT_MASK 0x00
611 #define _SIDD_UNIT_MASK 0x40
612 
613 /* Intrinsics for text/string processing. */
614 
615 #ifdef __OPTIMIZE__
616 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
617 _mm_cmpistrm (__m128i __X, __m128i __Y, const int __M)
618 {
619  return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X,
620  (__v16qi)__Y,
621  __M);
622 }
623 
624 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
625 _mm_cmpistri (__m128i __X, __m128i __Y, const int __M)
626 {
627  return __builtin_ia32_pcmpistri128 ((__v16qi)__X,
628  (__v16qi)__Y,
629  __M);
630 }
631 
632 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
633 _mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
634 {
635  return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX,
636  (__v16qi)__Y, __LY,
637  __M);
638 }
639 
640 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
641 _mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
642 {
643  return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX,
644  (__v16qi)__Y, __LY,
645  __M);
646 }
647 #else
648 #define _mm_cmpistrm(X, Y, M) \
649  ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(__m128i)(X), \
650  (__v16qi)(__m128i)(Y), (int)(M)))
651 #define _mm_cmpistri(X, Y, M) \
652  ((int) __builtin_ia32_pcmpistri128 ((__v16qi)(__m128i)(X), \
653  (__v16qi)(__m128i)(Y), (int)(M)))
654 
655 #define _mm_cmpestrm(X, LX, Y, LY, M) \
656  ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(__m128i)(X), \
657  (int)(LX), (__v16qi)(__m128i)(Y), \
658  (int)(LY), (int)(M)))
659 #define _mm_cmpestri(X, LX, Y, LY, M) \
660  ((int) __builtin_ia32_pcmpestri128 ((__v16qi)(__m128i)(X), (int)(LX), \
661  (__v16qi)(__m128i)(Y), (int)(LY), \
662  (int)(M)))
663 #endif
664 
665 /* Intrinsics for text/string processing and reading values of
666  EFlags. */
667 
668 #ifdef __OPTIMIZE__
669 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
670 _mm_cmpistra (__m128i __X, __m128i __Y, const int __M)
671 {
672  return __builtin_ia32_pcmpistria128 ((__v16qi)__X,
673  (__v16qi)__Y,
674  __M);
675 }
676 
677 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
678 _mm_cmpistrc (__m128i __X, __m128i __Y, const int __M)
679 {
680  return __builtin_ia32_pcmpistric128 ((__v16qi)__X,
681  (__v16qi)__Y,
682  __M);
683 }
684 
685 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
686 _mm_cmpistro (__m128i __X, __m128i __Y, const int __M)
687 {
688  return __builtin_ia32_pcmpistrio128 ((__v16qi)__X,
689  (__v16qi)__Y,
690  __M);
691 }
692 
693 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
694 _mm_cmpistrs (__m128i __X, __m128i __Y, const int __M)
695 {
696  return __builtin_ia32_pcmpistris128 ((__v16qi)__X,
697  (__v16qi)__Y,
698  __M);
699 }
700 
701 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
702 _mm_cmpistrz (__m128i __X, __m128i __Y, const int __M)
703 {
704  return __builtin_ia32_pcmpistriz128 ((__v16qi)__X,
705  (__v16qi)__Y,
706  __M);
707 }
708 
709 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
710 _mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
711 {
712  return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX,
713  (__v16qi)__Y, __LY,
714  __M);
715 }
716 
717 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
718 _mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
719 {
720  return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX,
721  (__v16qi)__Y, __LY,
722  __M);
723 }
724 
725 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
726 _mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
727 {
728  return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX,
729  (__v16qi)__Y, __LY,
730  __M);
731 }
732 
733 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
734 _mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
735 {
736  return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX,
737  (__v16qi)__Y, __LY,
738  __M);
739 }
740 
741 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
742 _mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
743 {
744  return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX,
745  (__v16qi)__Y, __LY,
746  __M);
747 }
748 #else
749 #define _mm_cmpistra(X, Y, M) \
750  ((int) __builtin_ia32_pcmpistria128 ((__v16qi)(__m128i)(X), \
751  (__v16qi)(__m128i)(Y), (int)(M)))
752 #define _mm_cmpistrc(X, Y, M) \
753  ((int) __builtin_ia32_pcmpistric128 ((__v16qi)(__m128i)(X), \
754  (__v16qi)(__m128i)(Y), (int)(M)))
755 #define _mm_cmpistro(X, Y, M) \
756  ((int) __builtin_ia32_pcmpistrio128 ((__v16qi)(__m128i)(X), \
757  (__v16qi)(__m128i)(Y), (int)(M)))
758 #define _mm_cmpistrs(X, Y, M) \
759  ((int) __builtin_ia32_pcmpistris128 ((__v16qi)(__m128i)(X), \
760  (__v16qi)(__m128i)(Y), (int)(M)))
761 #define _mm_cmpistrz(X, Y, M) \
762  ((int) __builtin_ia32_pcmpistriz128 ((__v16qi)(__m128i)(X), \
763  (__v16qi)(__m128i)(Y), (int)(M)))
764 
765 #define _mm_cmpestra(X, LX, Y, LY, M) \
766  ((int) __builtin_ia32_pcmpestria128 ((__v16qi)(__m128i)(X), (int)(LX), \
767  (__v16qi)(__m128i)(Y), (int)(LY), \
768  (int)(M)))
769 #define _mm_cmpestrc(X, LX, Y, LY, M) \
770  ((int) __builtin_ia32_pcmpestric128 ((__v16qi)(__m128i)(X), (int)(LX), \
771  (__v16qi)(__m128i)(Y), (int)(LY), \
772  (int)(M)))
773 #define _mm_cmpestro(X, LX, Y, LY, M) \
774  ((int) __builtin_ia32_pcmpestrio128 ((__v16qi)(__m128i)(X), (int)(LX), \
775  (__v16qi)(__m128i)(Y), (int)(LY), \
776  (int)(M)))
777 #define _mm_cmpestrs(X, LX, Y, LY, M) \
778  ((int) __builtin_ia32_pcmpestris128 ((__v16qi)(__m128i)(X), (int)(LX), \
779  (__v16qi)(__m128i)(Y), (int)(LY), \
780  (int)(M)))
781 #define _mm_cmpestrz(X, LX, Y, LY, M) \
782  ((int) __builtin_ia32_pcmpestriz128 ((__v16qi)(__m128i)(X), (int)(LX), \
783  (__v16qi)(__m128i)(Y), (int)(LY), \
784  (int)(M)))
785 #endif
786 
787 /* Packed integer 64-bit comparison, zeroing or filling with ones
788  corresponding parts of result. */
789 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
790 _mm_cmpgt_epi64 (__m128i __X, __m128i __Y)
791 {
792  return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y);
793 }
794 
795 #ifdef __POPCNT__
796 #include <popcntintrin.h>
797 #endif
798 
799 /* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */
800 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
801 _mm_crc32_u8 (unsigned int __C, unsigned char __V)
802 {
803  return __builtin_ia32_crc32qi (__C, __V);
804 }
805 
806 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
807 _mm_crc32_u16 (unsigned int __C, unsigned short __V)
808 {
809  return __builtin_ia32_crc32hi (__C, __V);
810 }
811 
812 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
813 _mm_crc32_u32 (unsigned int __C, unsigned int __V)
814 {
815  return __builtin_ia32_crc32si (__C, __V);
816 }
817 
818 #ifdef __x86_64__
819 extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
820 _mm_crc32_u64 (unsigned long long __C, unsigned long long __V)
821 {
822  return __builtin_ia32_crc32di (__C, __V);
823 }
824 #endif
825 
826 #endif /* __SSE4_2__ */
827 
828 #endif /* __SSE4_1__ */
829 
830 #endif /* _SMMINTRIN_H_INCLUDED */
__inline unsigned char unsigned int unsigned int __Y
Definition: adxintrin.h:33
#define __S(__i)
__inline __m128d __m128i __C
Definition: avxintrin.h:576
double __v4df __attribute__((__vector_size__(32)))
Definition: avxintrin.h:32
__inline int __m128d __V
Definition: avxintrin.h:1061
__inline __m256d double double double __D
Definition: avxintrin.h:1183
__inline unsigned char unsigned int __X
Definition: adxintrin.h:33
__inline __m256i __m256i __m256i __M
Definition: avx2intrin.h:199