|
| _CRTIMP void __cdecl | _wassert (_In_z_ const wchar_t *_Message, _In_z_ const wchar_t *_File, _In_ unsigned _Line) |
| |
| const __m128i | get_mask128 () |
| |
| M128 | operator& (const M128 &a, const M128 &b) |
| |
| M128 | operator| (const M128 &a, const M128 &b) |
| |
| M128 | operator^ (const M128 &a, const M128 &b) |
| |
| M128 | andnot (const M128 &a, const M128 &b) |
| |
| I64vec2 | unpack_low (const I64vec2 &a, const I64vec2 &b) |
| |
| I64vec2 | unpack_high (const I64vec2 &a, const I64vec2 &b) |
| |
| I32vec4 | cmpeq (const I32vec4 &a, const I32vec4 &b) |
| |
| I32vec4 | cmpneq (const I32vec4 &a, const I32vec4 &b) |
| |
| I32vec4 | unpack_low (const I32vec4 &a, const I32vec4 &b) |
| |
| I32vec4 | unpack_high (const I32vec4 &a, const I32vec4 &b) |
| |
| Is32vec4 | cmpeq (const Is32vec4 &a, const Is32vec4 &b) |
| |
| Is32vec4 | cmpneq (const Is32vec4 &a, const Is32vec4 &b) |
| |
| Is32vec4 | cmpgt (const Is32vec4 &a, const Is32vec4 &b) |
| |
| Is32vec4 | cmplt (const Is32vec4 &a, const Is32vec4 &b) |
| |
| Is32vec4 | unpack_low (const Is32vec4 &a, const Is32vec4 &b) |
| |
| Is32vec4 | unpack_high (const Is32vec4 &a, const Is32vec4 &b) |
| |
| I64vec2 | operator* (const Iu32vec4 &a, const Iu32vec4 &b) |
| |
| Iu32vec4 | cmpeq (const Iu32vec4 &a, const Iu32vec4 &b) |
| |
| Iu32vec4 | cmpneq (const Iu32vec4 &a, const Iu32vec4 &b) |
| |
| Iu32vec4 | unpack_low (const Iu32vec4 &a, const Iu32vec4 &b) |
| |
| Iu32vec4 | unpack_high (const Iu32vec4 &a, const Iu32vec4 &b) |
| |
| I16vec8 | operator* (const I16vec8 &a, const I16vec8 &b) |
| |
| I16vec8 | cmpeq (const I16vec8 &a, const I16vec8 &b) |
| |
| I16vec8 | cmpneq (const I16vec8 &a, const I16vec8 &b) |
| |
| I16vec8 | unpack_low (const I16vec8 &a, const I16vec8 &b) |
| |
| I16vec8 | unpack_high (const I16vec8 &a, const I16vec8 &b) |
| |
| Is16vec8 | operator* (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | cmpeq (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | cmpneq (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | cmpgt (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | cmplt (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | unpack_low (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | unpack_high (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | mul_high (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is32vec4 | mul_add (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | sat_add (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | sat_sub (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | simd_max (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Is16vec8 | simd_min (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Iu16vec8 | operator* (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| Iu16vec8 | cmpeq (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| Iu16vec8 | cmpneq (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| Iu16vec8 | unpack_low (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| Iu16vec8 | unpack_high (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| Iu16vec8 | sat_add (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| Iu16vec8 | sat_sub (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| Iu16vec8 | simd_avg (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| I16vec8 | mul_high (const Iu16vec8 &a, const Iu16vec8 &b) |
| |
| I8vec16 | cmpeq (const I8vec16 &a, const I8vec16 &b) |
| |
| I8vec16 | cmpneq (const I8vec16 &a, const I8vec16 &b) |
| |
| I8vec16 | unpack_low (const I8vec16 &a, const I8vec16 &b) |
| |
| I8vec16 | unpack_high (const I8vec16 &a, const I8vec16 &b) |
| |
| Is8vec16 | cmpeq (const Is8vec16 &a, const Is8vec16 &b) |
| |
| Is8vec16 | cmpneq (const Is8vec16 &a, const Is8vec16 &b) |
| |
| Is8vec16 | cmpgt (const Is8vec16 &a, const Is8vec16 &b) |
| |
| Is8vec16 | cmplt (const Is8vec16 &a, const Is8vec16 &b) |
| |
| Is8vec16 | unpack_low (const Is8vec16 &a, const Is8vec16 &b) |
| |
| Is8vec16 | unpack_high (const Is8vec16 &a, const Is8vec16 &b) |
| |
| Is8vec16 | sat_add (const Is8vec16 &a, const Is8vec16 &b) |
| |
| Is8vec16 | sat_sub (const Is8vec16 &a, const Is8vec16 &b) |
| |
| Iu8vec16 | cmpeq (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Iu8vec16 | cmpneq (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Iu8vec16 | unpack_low (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Iu8vec16 | unpack_high (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Iu8vec16 | sat_add (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Iu8vec16 | sat_sub (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| I64vec2 | sum_abs (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Iu8vec16 | simd_avg (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Iu8vec16 | simd_max (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Iu8vec16 | simd_min (const Iu8vec16 &a, const Iu8vec16 &b) |
| |
| Is16vec8 | pack_sat (const Is32vec4 &a, const Is32vec4 &b) |
| |
| Is8vec16 | pack_sat (const Is16vec8 &a, const Is16vec8 &b) |
| |
| Iu8vec16 | packu_sat (const Is16vec8 &a, const Is16vec8 &b) |
| |
| F64vec2 | unpack_low (const F64vec2 &a, const F64vec2 &b) |
| |
| F64vec2 | unpack_high (const F64vec2 &a, const F64vec2 &b) |
| |
| int | move_mask (const F64vec2 &a) |
| |
| void | loadu (F64vec2 &a, double *p) |
| |
| void | storeu (double *p, const F64vec2 &a) |
| |
| void | store_nta (double *p, F64vec2 &a) |
| |
| | F64vec2_SELECT (eq) F64vec2_SELECT(lt) F64vec2_SELECT(le) F64vec2_SELECT(gt) F64vec2_SELECT(ge) F64vec2_SELECT(neq) F64vec2_SELECT(nlt) F64vec2_SELECT(nle) inline int F64vec2ToInt(const F64vec2 &a) |
| |
| F64vec2 | F32vec4ToF64vec2 (const F32vec4 &a) |
| |
| F32vec4 | F64vec2ToF32vec4 (const F64vec2 &a) |
| |
| F64vec2 | IntToF64vec2 (const F64vec2 &a, int b) |
| |
| F32vec8 | unpack_low (const F32vec8 &a, const F32vec8 &b) |
| |
| F32vec8 | unpack_high (const F32vec8 &a, const F32vec8 &b) |
| |
| int | move_mask (const F32vec8 &a) |
| |
| void | loadu (F32vec8 &a, const float *p) |
| |
| void | storeu (float *p, const F32vec8 &a) |
| |
| void | store_nta (float *p, const F32vec8 &a) |
| |
| void | maskload (F32vec8 &a, const float *p, const F32vec8 &m) |
| |
| void | maskload (F32vec4 &a, const float *p, const F32vec4 &m) |
| |
| void | maskstore (float *p, const F32vec8 &a, const F32vec8 &m) |
| |
| void | maskstore (float *p, const F32vec4 &a, const F32vec4 &m) |
| |
| F32vec8 | select_eq (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_lt (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_le (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_gt (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_ge (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_neq (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_nlt (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_nle (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_ngt (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F32vec8 | select_nge (const F32vec8 &a, const F32vec8 &b, const F32vec8 &c, const F32vec8 &d) |
| |
| F64vec4 | unpack_low (const F64vec4 &a, const F64vec4 &b) |
| |
| F64vec4 | unpack_high (const F64vec4 &a, const F64vec4 &b) |
| |
| int | move_mask (const F64vec4 &a) |
| |
| void | loadu (F64vec4 &a, double *p) |
| |
| void | storeu (double *p, const F64vec4 &a) |
| |
| void | store_nta (double *p, const F64vec4 &a) |
| |
| void | maskload (F64vec4 &a, const double *p, const F64vec4 &m) |
| |
| void | maskload (F64vec2 &a, const double *p, const F64vec2 &m) |
| |
| void | maskstore (double *p, const F64vec4 &a, const F64vec4 &m) |
| |
| void | maskstore (double *p, const F64vec2 &a, const F64vec2 &m) |
| |
| F64vec4 | select_eq (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_lt (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_le (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_gt (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_ge (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_neq (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_nlt (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_nle (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_ngt (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | select_nge (const F64vec4 &a, const F64vec4 &b, const F64vec4 &c, const F64vec4 &d) |
| |
| F64vec4 | F32vec4ToF64vec4 (const F32vec4 &a) |
| |
| F32vec4 | F64vec4ToF32vec8 (const F64vec4 &a) |
| |