Partager via


Avx512F.VL Classe

Définition

public: ref class Avx512F::VL abstract
public abstract class Avx512F.VL
type Avx512F.VL = class
Public MustInherit Class Avx512F.VL
Héritage
Avx512F.VL
Dérivé

Propriétés

IsSupported

Méthodes

Abs(Vector128<Int64>)

__m128i _mm_abs_epi64 (__m128i a)

VPABSQ xmm1 {k1}{z}, xmm2/m128/m64bcst

Abs(Vector256<Int64>)

__m256i _mm256_abs_epi64 (__m128i a)

VPABSQ ymm1 {k1}{z}, ymm2/m256/m64bcst

AlignRight32(Vector128<Int32>, Vector128<Int32>, Byte)

__m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)

VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

AlignRight32(Vector128<UInt32>, Vector128<UInt32>, Byte)

__m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)

VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

AlignRight32(Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)

VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

AlignRight32(Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)

VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

AlignRight64(Vector128<Int64>, Vector128<Int64>, Byte)

__m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)

VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

AlignRight64(Vector128<UInt64>, Vector128<UInt64>, Byte)

__m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)

VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

AlignRight64(Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)

VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

AlignRight64(Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)

VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

CompareGreaterThan(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpgt_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)

CompareGreaterThan(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpgt_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6)

CompareGreaterThan(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpgt_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(6)

CompareGreaterThan(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpgt_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6)

CompareGreaterThanOrEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmpge_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmpge_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpge_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpge_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmpge_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmpge_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpge_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpge_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)

CompareLessThan(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmplt_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)

CompareLessThan(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmplt_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)

CompareLessThan(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmplt_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)

CompareLessThan(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmplt_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)

CompareLessThan(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmplt_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)

CompareLessThan(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmplt_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)

CompareLessThan(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmplt_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)

CompareLessThan(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmplt_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)

CompareLessThanOrEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmple_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmple_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmple_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmple_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmple_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmple_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmple_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmple_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)

CompareNotEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmpne_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)

CompareNotEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmpne_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)

CompareNotEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpne_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)

CompareNotEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpne_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)

CompareNotEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmpne_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)

CompareNotEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmpne_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)

CompareNotEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpne_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)

CompareNotEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpne_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)

ConvertToVector128Byte(Vector128<Int32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<Int64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128Byte(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128ByteWithSaturation(Vector128<UInt32>)

__m128i _mm_cvtusepi32_epi8 (__m128i a)

VPMOVUSDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128ByteWithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi8 (__m128i a)

VPMOVUSQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128ByteWithSaturation(Vector256<UInt32>)

__m128i _mm256_cvtusepi32_epi8 (__m256i a)

VPMOVUSDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128ByteWithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi8 (__m256i a)

VPMOVUSQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128Double(Vector128<UInt32>)

__m128d _mm_cvtepu32_pd (__m128i a)

VCVTUDQ2PD xmm1 {k1}{z}, xmm2/m64/m32bcst

ConvertToVector128Int16(Vector128<Int32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<Int64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector128<Int32>)

__m128i _mm_cvtsepi32_epi16 (__m128i a)

VPMOVSDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi16 (__m128i a)

VPMOVSQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector256<Int32>)

__m128i _mm256_cvtsepi32_epi16 (__m256i a)

VPMOVSDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi16 (__m256i a)

VPMOVSQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int32(Vector128<Int64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Int32(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Int32WithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi32 (__m128i a)

VPMOVSQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32WithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi32 (__m256i a)

VPMOVSQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128SByte(Vector128<Int32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<Int64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector128<Int32>)

__m128i _mm_cvtsepi32_epi8 (__m128i a)

VPMOVSDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi8 (__m128i a)

VPMOVSQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector256<Int32>)

__m128i _mm256_cvtsepi32_epi8 (__m256i a)

VPMOVSDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi8 (__m256i a)

VPMOVSQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Single(Vector128<UInt32>)

__m128 _mm_cvtepu32_ps (__m128i a)

VCVTUDQ2PS xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128UInt16(Vector128<Int32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<Int64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt16WithSaturation(Vector128<UInt32>)

__m128i _mm_cvtusepi32_epi16 (__m128i a)

VPMOVUSDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16WithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi16 (__m128i a)

VPMOVUSQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16WithSaturation(Vector256<UInt32>)

__m128i _mm256_cvtusepi32_epi16 (__m256i a)

VPMOVUSDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16WithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi16 (__m256i a)

VPMOVUSQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt32(Vector128<Double>)

__m128i _mm_cvtpd_epu32 (__m128d a)

VCVTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128UInt32(Vector128<Int64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32(Vector128<Single>)

__m128i _mm_cvtps_epu32 (__m128 a)

VCVTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128UInt32(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32(Vector256<Double>)

__m128i _mm256_cvtpd_epu32 (__m256d a)

VCVTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector128UInt32(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32WithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi32 (__m128i a)

VPMOVUSQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32WithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi32 (__m256i a)

VPMOVUSQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32WithTruncation(Vector128<Double>)

__m128i _mm_cvttpd_epu32 (__m128d a)

VCVTTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128UInt32WithTruncation(Vector128<Single>)

__m128i _mm_cvttps_epu32 (__m128 a)

VCVTTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128UInt32WithTruncation(Vector256<Double>)

__m128i _mm256_cvttpd_epu32 (__m256d a)

VCVTTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector256Double(Vector128<UInt32>)

__m256d _mm512_cvtepu32_pd (__m128i a)

VCVTUDQ2PD ymm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector256Single(Vector256<UInt32>)

__m256 _mm256_cvtepu32_ps (__m256i a)

VCVTUDQ2PS ymm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector256UInt32(Vector256<Single>)

__m256i _mm256_cvtps_epu32 (__m256 a)

VCVTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector256UInt32WithTruncation(Vector256<Single>)

__m256i _mm256_cvttps_epu32 (__m256 a)

VCVTTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst

Equals(Object)

Détermine si l'objet spécifié est égal à l'objet actuel.

(Hérité de Object)
Fixup(Vector128<Double>, Vector128<Double>, Vector128<Int64>, Byte)

__m128d _mm_fixupimm_pd(__m128d a, __m128d b, __m128i tbl, int imm) ; VFIXUPIMMPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

Fixup(Vector128<Single>, Vector128<Single>, Vector128<Int32>, Byte)

__m128 _mm_fixupimm_ps(__m128 a, __m128 b, __m128i tbl, int imm) ; VFIXUPIMMPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

Fixup(Vector256<Double>, Vector256<Double>, Vector256<Int64>, Byte)

__m256d _mm256_fixupimm_pd(__m256d a, __m256d b, __m256i tbl, int imm) ; VFIXUPIMMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Fixup(Vector256<Single>, Vector256<Single>, Vector256<Int32>, Byte)

__m256 _mm256_fixupimm_ps(__m256 a, __m256 b, __m256i tbl, int imm) ; VFIXUPIMMPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

GetExponent(Vector128<Double>)

__m128d _mm_getexp_pd (__m128d a)

VGETEXPPD xmm1 {k1}{z}, xmm2/m128/m64bcst

GetExponent(Vector128<Single>)

__m128 _mm_getexp_ps (__m128 a)

VGETEXPPS xmm1 {k1}{z}, xmm2/m128/m32bcst

GetExponent(Vector256<Double>)

__m256d _mm256_getexp_pd (__m256d a)

VGETEXPPD ymm1 {k1}{z}, ymm2/m256/m64bcst

GetExponent(Vector256<Single>)

__m256 _mm256_getexp_ps (__m256 a)

VGETEXPPS ymm1 {k1}{z}, ymm2/m256/m32bcst

GetHashCode()

Fait office de fonction de hachage par défaut.

(Hérité de Object)
GetMantissa(Vector128<Double>, Byte)

__m128d _mm_getmant_pd (__m128d a)

VGETMANTPD xmm1 {k1}{z}, xmm2/m128/m64bcst

GetMantissa(Vector128<Single>, Byte)

__m128 _mm_getmant_ps (__m128 a)

VGETMANTPS xmm1 {k1}{z}, xmm2/m128/m32bcst

GetMantissa(Vector256<Double>, Byte)

__m256d _mm256_getmant_pd (__m256d a)

VGETMANTPD ymm1 {k1}{z}, ymm2/m256/m64bcst

GetMantissa(Vector256<Single>, Byte)

__m256 _mm256_getmant_ps (__m256 a)

VGETMANTPS ymm1 {k1}{z}, ymm2/m256/m32bcst

GetType()

Obtient le Type de l'instance actuelle.

(Hérité de Object)
Max(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_max_epi64 (__m128i a, __m128i b)

VPMAXSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Max(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_max_epu64 (__m128i a, __m128i b)

VPMAXUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Max(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_max_epi64 (__m256i a, __m256i b)

VPMAXSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Max(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_max_epu64 (__m256i a, __m256i b)

VPMAXUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

MemberwiseClone()

Crée une copie superficielle du Object actuel.

(Hérité de Object)
Min(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_min_epi64 (__m128i a, __m128i b)

VPMINSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Min(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_min_epu64 (__m128i a, __m128i b)

VPMINUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Min(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_min_epi64 (__m256i a, __m256i b)

VPMINSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Min(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_min_epu64 (__m256i a, __m256i b)

VPMINUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar2x64x2(Vector128<Double>, Vector128<Int64>, Vector128<Double>)

__m128d _mm_permutex2var_pd (__m128d a, __m128i idx, __m128i b)

VPERMI2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar2x64x2(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)

VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar2x64x2(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)

VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar4x32x2(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)

VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x32x2(Vector128<Single>, Vector128<Int32>, Vector128<Single>)

__m128 _mm_permutex2var_ps (__m128 a, __m128i idx, __m128i b)

VPERMI2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x32x2(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)

VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x64(Vector256<Double>, Vector256<Int64>)

__m256d _mm256_permute4x64_pd (__m256d a, __m256i b)

VPERMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_permute4x64_epi64 (__m256i a, __m256i b)

VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_permute4x64_pd (__m256d a, __m256i b)

VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<Double>, Vector256<Int64>, Vector256<Double>)

__m256d _mm256_permutex2var_pd (__m256d a, __m256i idx, __m256i b)

VPERMI2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)

VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)

VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar8x32x2(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)

VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

PermuteVar8x32x2(Vector256<Single>, Vector256<Int32>, Vector256<Single>)

__m256 _mm256_permutex2var_ps (__m256 a, __m256i idx, __m256i b)

VPERMI2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

PermuteVar8x32x2(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)

VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

Reciprocal14(Vector128<Double>)

__m128d _mm_rcp14_pd (__m128d a, __m128d b)

VRCP14PD xmm1 {k1}{z}, xmm2/m128/m64bcst

Reciprocal14(Vector128<Single>)

__m128 _mm_rcp14_ps (__m128 a, __m128 b)

VRCP14PS xmm1 {k1}{z}, xmm2/m128/m32bcst

Reciprocal14(Vector256<Double>)

__m256d _mm256_rcp14_pd (__m256d a, __m256d b)

VRCP14PD ymm1 {k1}{z}, ymm2/m256/m64bcst

Reciprocal14(Vector256<Single>)

__m256 _mm256_rcp14_ps (__m256 a, __m256 b)

VRCP14PS ymm1 {k1}{z}, ymm2/m256/m32bcst

ReciprocalSqrt14(Vector128<Double>)

__m128d _mm_rsqrt14_pd (__m128d a, __m128d b)

VRSQRT14PD xmm1 {k1}{z}, xmm2/m128/m64bcst

ReciprocalSqrt14(Vector128<Single>)

__m128 _mm_rsqrt14_ps (__m128 a, __m128 b)

VRSQRT14PS xmm1 {k1}{z}, xmm2/m128/m32bcst

ReciprocalSqrt14(Vector256<Double>)

__m256d _mm256_rsqrt14_pd (__m256d a, __m256d b)

VRSQRT14PD ymm1 {k1}{z}, ymm2/m256/m64bcst

ReciprocalSqrt14(Vector256<Single>)

__m256 _mm256_rsqrt14_ps (__m256 a, __m256 b)

VRSQRT14PS ymm1 {k1}{z}, ymm2/m256/m32bcst

RotateLeft(Vector128<Int32>, Byte)

__m128i _mm_rol_epi32 (__m128i a, int imm8)

VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateLeft(Vector128<Int64>, Byte)

__m128i _mm_rol_epi64 (__m128i a, int imm8)

VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateLeft(Vector128<UInt32>, Byte)

__m128i _mm_rol_epi32 (__m128i a, int imm8)

VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateLeft(Vector128<UInt64>, Byte)

__m128i _mm_rol_epi64 (__m128i a, int imm8)

VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateLeft(Vector256<Int32>, Byte)

__m256i _mm256_rol_epi32 (__m256i a, int imm8)

VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateLeft(Vector256<Int64>, Byte)

__m256i _mm256_rol_epi64 (__m256i a, int imm8)

VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateLeft(Vector256<UInt32>, Byte)

__m256i _mm256_rol_epi32 (__m256i a, int imm8)

VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateLeft(Vector256<UInt64>, Byte)

__m256i _mm256_rol_epi64 (__m256i a, int imm8)

VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateLeftVariable(Vector128<Int32>, Vector128<UInt32>)

__m128i _mm_rolv_epi32 (__m128i a, __m128i b)

VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateLeftVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_rolv_epi64 (__m128i a, __m128i b)

VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateLeftVariable(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_rolv_epi32 (__m128i a, __m128i b)

VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateLeftVariable(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_rolv_epi64 (__m128i a, __m128i b)

VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateLeftVariable(Vector256<Int32>, Vector256<UInt32>)

__m256i _mm256_rolv_epi32 (__m256i a, __m256i b)

VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateLeftVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_rolv_epi64 (__m256i a, __m256i b)

VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateLeftVariable(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_rolv_epi32 (__m256i a, __m256i b)

VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateLeftVariable(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_rolv_epi64 (__m256i a, __m256i b)

VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateRight(Vector128<Int32>, Byte)

__m128i _mm_ror_epi32 (__m128i a, int imm8)

VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateRight(Vector128<Int64>, Byte)

__m128i _mm_ror_epi64 (__m128i a, int imm8)

VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateRight(Vector128<UInt32>, Byte)

__m128i _mm_ror_epi32 (__m128i a, int imm8)

VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateRight(Vector128<UInt64>, Byte)

__m128i _mm_ror_epi64 (__m128i a, int imm8)

VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateRight(Vector256<Int32>, Byte)

__m256i _mm256_ror_epi32 (__m256i a, int imm8)

VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateRight(Vector256<Int64>, Byte)

__m256i _mm256_ror_epi64 (__m256i a, int imm8)

VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateRight(Vector256<UInt32>, Byte)

__m256i _mm256_ror_epi32 (__m256i a, int imm8)

VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateRight(Vector256<UInt64>, Byte)

__m256i _mm256_ror_epi64 (__m256i a, int imm8)

VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateRightVariable(Vector128<Int32>, Vector128<UInt32>)

__m128i _mm_rorv_epi32 (__m128i a, __m128i b)

VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateRightVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_rorv_epi64 (__m128i a, __m128i b)

VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateRightVariable(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_rorv_epi32 (__m128i a, __m128i b)

VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateRightVariable(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_rorv_epi64 (__m128i a, __m128i b)

VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateRightVariable(Vector256<Int32>, Vector256<UInt32>)

__m256i _mm256_rorv_epi32 (__m256i a, __m256i b)

VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateRightVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_rorv_epi64 (__m256i a, __m256i b)

VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateRightVariable(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_rorv_epi32 (__m256i a, __m256i b)

VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateRightVariable(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_rorv_epi64 (__m256i a, __m256i b)

VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RoundScale(Vector128<Double>, Byte)

__m128d _mm_roundscale_pd (__m128d a, int imm)

VRNDSCALEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RoundScale(Vector128<Single>, Byte)

__m128 _mm_roundscale_ps (__m128 a, int imm)

VRNDSCALEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RoundScale(Vector256<Double>, Byte)

__m256d _mm256_roundscale_pd (__m256d a, int imm)

VRNDSCALEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RoundScale(Vector256<Single>, Byte)

__m256 _mm256_roundscale_ps (__m256 a, int imm)

VRNDSCALEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

Scale(Vector128<Double>, Vector128<Double>)

__m128d _mm_scalef_pd (__m128d a, int imm)

VSCALEFPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Scale(Vector128<Single>, Vector128<Single>)

__m128 _mm_scalef_ps (__m128 a, int imm)

VSCALEFPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

Scale(Vector256<Double>, Vector256<Double>)

__m256d _mm256_scalef_pd (__m256d a, int imm)

VSCALEFPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Scale(Vector256<Single>, Vector256<Single>)

__m256 _mm256_scalef_ps (__m256 a, int imm)

VSCALEFPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

ShiftRightArithmetic(Vector128<Int64>, Byte)

__128i _mm_srai_epi64 (__m128i a, int imm8)

VPSRAQ xmm1 {k1}{z}, xmm2, imm8

ShiftRightArithmetic(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_sra_epi64 (__m128i a, __m128i nombre)

VPSRAQ xmm1 {k1}{z}, xmm2, xmm3/m128

ShiftRightArithmetic(Vector256<Int64>, Byte)

__m256i _mm256_srai_epi64 (__m256i a, int imm8)

VPSRAQ ymm1 {k1}{z}, ymm2, imm8

ShiftRightArithmetic(Vector256<Int64>, Vector128<Int64>)

__m256i _mm256_sra_epi64 (__m256i a, __m128i nombre)

VPSRAQ ymm1 {k1}{z}, ymm2, xmm3/m128

ShiftRightArithmeticVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_srav_epi64 (__m128i a, __m128i nombre)

VPSRAVQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

ShiftRightArithmeticVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_srav_epi64 (__m256i a, __m256i nombre)

VPSRAVQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Shuffle2x128(Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_shuffle_f64x2 (__m256d a, __m256d b, const int imm8)

VSHUFF64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Shuffle2x128(Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)

VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)

VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Shuffle2x128(Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_shuffle_f32x4 (__m256 a, __m256 b, const int imm8)

VSHUFF32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)

VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)

VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

TernaryLogic(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, octet imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector128<Double>, Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_ternarylogic_pd (__m128d a, __m128d b, __m128d c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, court imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>, Byte)

__m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

TernaryLogic(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>, Byte)

__m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

TernaryLogic(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, octet imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector128<Single>, Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_ternarylogic_ps (__m128 a, __m128 b, __m128 c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, court imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>, Byte)

__m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

TernaryLogic(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>, Byte)

__m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

TernaryLogic(Vector256<Byte>, Vector256<Byte>, Vector256<Byte>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, octet imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector256<Double>, Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_ternarylogic_pd (__m256d a, __m256d b, __m256d c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector256<Int16>, Vector256<Int16>, Vector256<Int16>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, court imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

TernaryLogic(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

TernaryLogic(Vector256<SByte>, Vector256<SByte>, Vector256<SByte>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, octet imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector256<Single>, Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_ternarylogic_ps (__m256 a, __m256 b, __m256 c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector256<UInt16>, Vector256<UInt16>, Vector256<UInt16>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, court imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 La signature native ci-dessus n’existe pas. Nous fournissons cette surcharge supplémentaire pour la cohérence avec les autres API de bits.

TernaryLogic(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

TernaryLogic(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

ToString()

Retourne une chaîne qui représente l'objet actuel.

(Hérité de Object)

S’applique à