Avx2 Klasa
Definicja
Ważne
Niektóre informacje odnoszą się do produktu w wersji wstępnej, który może zostać znacząco zmodyfikowany przed wydaniem. Firma Microsoft nie udziela żadnych gwarancji, jawnych lub domniemanych, w odniesieniu do informacji podanych w tym miejscu.
Ważne
Ten interfejs API nie jest zgodny ze specyfikacją CLS.
Ta klasa zapewnia dostęp do instrukcji sprzętowych Intel AVX2 za pośrednictwem funkcji wewnętrznych.
public ref class Avx2 abstract : System::Runtime::Intrinsics::X86::Avx
[System.CLSCompliant(false)]
public abstract class Avx2 : System.Runtime.Intrinsics.X86.Avx
[<System.CLSCompliant(false)>]
type Avx2 = class
inherit Avx
Public MustInherit Class Avx2
Inherits Avx
- Dziedziczenie
- Dziedziczenie
- Pochodne
- Atrybuty
Właściwości
IsSupported |
Ta klasa zapewnia dostęp do instrukcji sprzętowych Intel AVX2 za pośrednictwem funkcji wewnętrznych. |
Metody
Abs(Vector256<Int16>) |
__m256i _mm256_abs_epi16 (__m256i a) VPABSW ymm, ymm/m256 |
Abs(Vector256<Int32>) |
__m256i _mm256_abs_epi32 (__m256i a) VPABSD ymm, ymm/m256 |
Abs(Vector256<SByte>) |
__m256i _mm256_abs_epi8 (__m256i a) VPABSB ymm, ymm/m256 |
Add(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_add_epi8 (__m256i a, __m256i b) VPADDB ymm, ymm, ymm, ymm/m256 |
Add(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_add_epi16 (__m256i a, __m256i b) VPADDW ymm, ymm, ymm, ymm/m256 |
Add(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_add_epi32 (__m256i a, __m256i b) VPADDD ymm, ymm, ymm, ymm/m256 |
Add(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_add_epi64 (__m256i a, __m256i b) VPADDQ ymm, ymm, ymm, ymm/m256 |
Add(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_add_epi8 (__m256i a, __m256i b) VPADDB ymm, ymm, ymm, ymm/m256 |
Add(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_add_epi16 (__m256i a, __m256i b) VPADDW ymm, ymm, ymm, ymm/m256 |
Add(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_add_epi32 (__m256i a, __m256i b) VPADDD ymm, ymm, ymm, ymm/m256 |
Add(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_add_epi64 (__m256i a, __m256i b) VPADDQ ymm, ymm, ymm, ymm/m256 |
AddSaturate(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_adds_epu8 (__m256i a, __m256i b) VPADDUSB ymm, ymm, ymm, ymm/m256 |
AddSaturate(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_adds_epi16 (__m256i a, __m256i b) VPADDSW ymm, ymm, ymm, ymm/m256 |
AddSaturate(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_adds_epi8 (__m256i a, __m256i b) VPADDSB ymm, ymm, ymm, ymm/m256 |
AddSaturate(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_adds_epu16 (__m256i a, __m256i b) VPADDUSW ymm, ymm, ymm/ m256 |
AlignRight(Vector256<Byte>, Vector256<Byte>, Byte) |
__m256i _mm256_alignr_epi8 (__m256i __m256i b, const int count) VPALIGNR ymm, ymm, ymm, m256, imm8 |
AlignRight(Vector256<Int16>, Vector256<Int16>, Byte) |
__m256i _mm256_alignr_epi8 (__m256i __m256i b, const int count) VPALIGNR ymm, ymm, ymm, m256, imm8 |
AlignRight(Vector256<Int32>, Vector256<Int32>, Byte) |
__m256i _mm256_alignr_epi8 (__m256i __m256i b, const int count) VPALIGNR ymm, ymm, ymm, m256, imm8 |
AlignRight(Vector256<Int64>, Vector256<Int64>, Byte) |
__m256i _mm256_alignr_epi8 (__m256i __m256i b, const int count) VPALIGNR ymm, ymm, ymm, m256, imm8 |
AlignRight(Vector256<SByte>, Vector256<SByte>, Byte) |
__m256i _mm256_alignr_epi8 (__m256i __m256i b, const int count) VPALIGNR ymm, ymm, ymm, m256, imm8 |
AlignRight(Vector256<UInt16>, Vector256<UInt16>, Byte) |
__m256i _mm256_alignr_epi8 (__m256i __m256i b, const int count) VPALIGNR ymm, ymm, ymm, m256, imm8 |
AlignRight(Vector256<UInt32>, Vector256<UInt32>, Byte) |
__m256i _mm256_alignr_epi8 (__m256i __m256i b, const int count) VPALIGNR ymm, ymm, ymm, m256, imm8 |
AlignRight(Vector256<UInt64>, Vector256<UInt64>, Byte) |
__m256i _mm256_alignr_epi8 (__m256i __m256i b, const int count) VPALIGNR ymm, ymm, ymm, m256, imm8 |
And(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_and_si256 (__m256i a, __m256i b) VPAND ymm, ymm, ymm/m256 |
And(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_and_si256 (__m256i a, __m256i b) VPAND ymm, ymm, ymm/m256 |
And(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_and_si256 (__m256i a, __m256i b) VPAND ymm, ymm, ymm/m256 |
And(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_and_si256 (__m256i a, __m256i b) VPAND ymm, ymm, ymm/m256 |
And(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_and_si256 (__m256i a, __m256i b) VPAND ymm, ymm, ymm/m256 |
And(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_and_si256 (__m256i a, __m256i b) VPAND ymm, ymm, ymm/m256 |
And(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_and_si256 (__m256i a, __m256i b) VPAND ymm, ymm, ymm/m256 |
And(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_and_si256 (__m256i a, __m256i b) VPAND ymm, ymm, ymm/m256 |
AndNot(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_andnot_si256 (__m256i a, __m256i b) VPANDN ymm, ymm, ymm/ m256 |
AndNot(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_andnot_si256 (__m256i a, __m256i b) VPANDN ymm, ymm, ymm/ m256 |
AndNot(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_andnot_si256 (__m256i a, __m256i b) VPANDN ymm, ymm, ymm/ m256 |
AndNot(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_andnot_si256 (__m256i a, __m256i b) VPANDN ymm, ymm, ymm/ m256 |
AndNot(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_andnot_si256 (__m256i a, __m256i b) VPANDN ymm, ymm, ymm/ m256 |
AndNot(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_andnot_si256 (__m256i a, __m256i b) VPANDN ymm, ymm, ymm/ m256 |
AndNot(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_andnot_si256 (__m256i a, __m256i b) VPANDN ymm, ymm, ymm/ m256 |
AndNot(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_andnot_si256 (__m256i a, __m256i b) VPANDN ymm, ymm, ymm/ m256 |
Average(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_avg_epu8 (__m256i a, __m256i b) VPAVGB ymm, ymm, ymm, ymm/m256 |
Average(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_avg_epu16 (__m256i a, __m256i b) VPAVGW ymm, ymm, ymm/ m256 |
Blend(Vector128<Int32>, Vector128<Int32>, Byte) |
__m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8) VPBLENDD xmm, xmm, xmm/m128, imm8 |
Blend(Vector128<UInt32>, Vector128<UInt32>, Byte) |
__m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8) VPBLENDD xmm, xmm, xmm/m128, imm8 |
Blend(Vector256<Int16>, Vector256<Int16>, Byte) |
__m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8) VPBLENDW ymm, ymm, ymm/m256, imm8 |
Blend(Vector256<Int32>, Vector256<Int32>, Byte) |
__m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8) VPBLENDD ymm, ymm, ymm/m256, imm8 |
Blend(Vector256<UInt16>, Vector256<UInt16>, Byte) |
__m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8) VPBLENDW ymm, ymm, ymm/m256, imm8 |
Blend(Vector256<UInt32>, Vector256<UInt32>, Byte) |
__m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8) VPBLENDD ymm, ymm, ymm/m256, imm8 |
BlendVariable(Vector256<Byte>, Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_blendv_epi8 (maska __m256i, __m256i b, __m256i) VPBLENDVB ymm, ymm, ymm/m256, ymm |
BlendVariable(Vector256<Int16>, Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_blendv_epi8 (maska __m256i, __m256i b, __m256i) VPBLENDVB ymm, ymm, ymm/m256, ymm |
BlendVariable(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_blendv_epi8 (maska __m256i, __m256i b, __m256i) VPBLENDVB ymm, ymm, ymm/m256, ymm |
BlendVariable(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_blendv_epi8 (maska __m256i, __m256i b, __m256i) VPBLENDVB ymm, ymm, ymm/m256, ymm |
BlendVariable(Vector256<SByte>, Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_blendv_epi8 (maska __m256i, __m256i b, __m256i) VPBLENDVB ymm, ymm, ymm/m256, ymm |
BlendVariable(Vector256<UInt16>, Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_blendv_epi8 (maska __m256i, __m256i b, __m256i) VPBLENDVB ymm, ymm, ymm/m256, ymm |
BlendVariable(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_blendv_epi8 (maska __m256i, __m256i b, __m256i) VPBLENDVB ymm, ymm, ymm/m256, ymm |
BlendVariable(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_blendv_epi8 (maska __m256i, __m256i b, __m256i) VPBLENDVB ymm, ymm, ymm/m256, ymm |
BroadcastScalarToVector128(Byte*) |
__m128i _mm_broadcastb_epi8 (__m128i a) VPBROADCASTB xmm, m8 |
BroadcastScalarToVector128(Int16*) |
__m128i _mm_broadcastw_epi16 (__m128i a) VPBROADCASTW xmm, m16 |
BroadcastScalarToVector128(Int32*) |
__m128i _mm_broadcastd_epi32 (__m128i a) VPBROADCASTD xmm, m32 |
BroadcastScalarToVector128(Int64*) |
__m128i _mm_broadcastq_epi64 (__m128i a) VPBROADCASTQ xmm, m64 |
BroadcastScalarToVector128(SByte*) |
__m128i _mm_broadcastb_epi8 (__m128i a) VPBROADCASTB xmm, m8 |
BroadcastScalarToVector128(UInt16*) |
__m128i _mm_broadcastw_epi16 (__m128i a) VPBROADCASTW xmm, m16 |
BroadcastScalarToVector128(UInt32*) |
__m128i _mm_broadcastd_epi32 (__m128i a) VPBROADCASTD xmm, m32 |
BroadcastScalarToVector128(UInt64*) |
__m128i _mm_broadcastq_epi64 (__m128i a) VPBROADCASTQ xmm, m64 |
BroadcastScalarToVector128(Vector128<Byte>) |
__m128i _mm_broadcastb_epi8 (__m128i a) VPBROADCASTB xmm, xmm |
BroadcastScalarToVector128(Vector128<Double>) |
__m128d _mm_broadcastsd_pd (__m128d a) VMOVDDUP xmm, xmm |
BroadcastScalarToVector128(Vector128<Int16>) |
__m128i _mm_broadcastw_epi16 (__m128i a) VPBROADCASTW xmm, xmm |
BroadcastScalarToVector128(Vector128<Int32>) |
__m128i _mm_broadcastd_epi32 (__m128i a) VPBROADCASTD xmm, xmm |
BroadcastScalarToVector128(Vector128<Int64>) |
__m128i _mm_broadcastq_epi64 (__m128i a) VPBROADCASTQ xmm, xmm |
BroadcastScalarToVector128(Vector128<SByte>) |
__m128i _mm_broadcastb_epi8 (__m128i a) VPBROADCASTB xmm, xmm |
BroadcastScalarToVector128(Vector128<Single>) |
__m128 _mm_broadcastss_ps (__m128 a) VBROADCASTSS xmm, xmm |
BroadcastScalarToVector128(Vector128<UInt16>) |
__m128i _mm_broadcastw_epi16 (__m128i a) VPBROADCASTW xmm, xmm |
BroadcastScalarToVector128(Vector128<UInt32>) |
__m128i _mm_broadcastd_epi32 (__m128i a) VPBROADCASTD xmm, xmm |
BroadcastScalarToVector128(Vector128<UInt64>) |
__m128i _mm_broadcastq_epi64 (__m128i a) VPBROADCASTQ xmm, xmm |
BroadcastScalarToVector256(Byte*) |
__m256i _mm256_broadcastb_epi8 (__m128i a) VPBROADCASTB ymm, m8 |
BroadcastScalarToVector256(Int16*) |
__m256i _mm256_broadcastw_epi16 (__m128i a) VPBROADCASTW ymm, m16 |
BroadcastScalarToVector256(Int32*) |
__m256i _mm256_broadcastd_epi32 (__m128i a) VPBROADCASTD ymm, m32 |
BroadcastScalarToVector256(Int64*) |
__m256i _mm256_broadcastq_epi64 (__m128i a) VPBROADCASTQ ymm, m64 |
BroadcastScalarToVector256(SByte*) |
__m256i _mm256_broadcastb_epi8 (__m128i a) VPBROADCASTB ymm, m8 |
BroadcastScalarToVector256(UInt16*) |
__m256i _mm256_broadcastw_epi16 (__m128i a) VPBROADCASTW ymm, m16 |
BroadcastScalarToVector256(UInt32*) |
__m256i _mm256_broadcastd_epi32 (__m128i a) VPBROADCASTD ymm, m32 |
BroadcastScalarToVector256(UInt64*) |
__m256i _mm256_broadcastq_epi64 (__m128i a) VPBROADCASTQ ymm, m64 |
BroadcastScalarToVector256(Vector128<Byte>) |
__m256i _mm256_broadcastb_epi8 (__m128i a) VPBROADCASTB ymm, xmm |
BroadcastScalarToVector256(Vector128<Double>) |
__m256d _mm256_broadcastsd_pd (__m128d a) VBROADCASTSD ymm, xmm |
BroadcastScalarToVector256(Vector128<Int16>) |
__m256i _mm256_broadcastw_epi16 (__m128i a) VPBROADCASTW ymm, xmm |
BroadcastScalarToVector256(Vector128<Int32>) |
__m256i _mm256_broadcastd_epi32 (__m128i a) VPBROADCASTD ymm, xmm |
BroadcastScalarToVector256(Vector128<Int64>) |
__m256i _mm256_broadcastq_epi64 (__m128i a) VPBROADCASTQ ymm, xmm |
BroadcastScalarToVector256(Vector128<SByte>) |
__m256i _mm256_broadcastb_epi8 (__m128i a) VPBROADCASTB ymm, xmm |
BroadcastScalarToVector256(Vector128<Single>) |
__m256 _mm256_broadcastss_ps (__m128 a) VBROADCASTSS ymm, xmm |
BroadcastScalarToVector256(Vector128<UInt16>) |
__m256i _mm256_broadcastw_epi16 (__m128i a) VPBROADCASTW ymm, xmm |
BroadcastScalarToVector256(Vector128<UInt32>) |
__m256i _mm256_broadcastd_epi32 (__m128i a) VPBROADCASTD ymm, xmm |
BroadcastScalarToVector256(Vector128<UInt64>) |
__m256i _mm256_broadcastq_epi64 (__m128i a) VPBROADCASTQ ymm, xmm |
BroadcastVector128ToVector256(Byte*) |
__m256i _mm256_broadcastsi128_si256 (__m128i a) VBROADCASTI128 ymm, m128 |
BroadcastVector128ToVector256(Int16*) |
__m256i _mm256_broadcastsi128_si256 (__m128i a) VBROADCASTI128 ymm, m128 |
BroadcastVector128ToVector256(Int32*) |
__m256i _mm256_broadcastsi128_si256 (__m128i a) VBROADCASTI128 ymm, m128 |
BroadcastVector128ToVector256(Int64*) |
__m256i _mm256_broadcastsi128_si256 (__m128i a) VBROADCASTI128 ymm, m128 |
BroadcastVector128ToVector256(SByte*) |
__m256i _mm256_broadcastsi128_si256 (__m128i a) VBROADCASTI128 ymm, m128 |
BroadcastVector128ToVector256(UInt16*) |
__m256i _mm256_broadcastsi128_si256 (__m128i a) VBROADCASTI128 ymm, m128 |
BroadcastVector128ToVector256(UInt32*) |
__m256i _mm256_broadcastsi128_si256 (__m128i a) VBROADCASTI128 ymm, m128 |
BroadcastVector128ToVector256(UInt64*) |
__m256i _mm256_broadcastsi128_si256 (__m128i a) VBROADCASTI128 ymm, m128 |
CompareEqual(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b) VPCMPEQB ymm, ymm, ymm/m256 |
CompareEqual(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b) VPCMPEQW ymm, ymm, ymm/m256 |
CompareEqual(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b) VPCMPEQD ymm, ymm, ymm/m256 |
CompareEqual(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b) VPCMPEQQ ymm, ymm, ymm/m256 |
CompareEqual(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b) VPCMPEQB ymm, ymm, ymm/m256 |
CompareEqual(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b) VPCMPEQW ymm, ymm, ymm/m256 |
CompareEqual(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b) VPCMPEQD ymm, ymm, ymm/m256 |
CompareEqual(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b) VPCMPEQQ ymm, ymm, ymm/m256 |
CompareGreaterThan(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_cmpgt_epi16 (__m256i a, __m256i b) VPCMPGTW ymm, ymm, ymm/m256 |
CompareGreaterThan(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_cmpgt_epi32 (__m256i a, __m256i b) VPCMPGTD ymm, ymm, ymm/ m256 |
CompareGreaterThan(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_cmpgt_epi64 (__m256i a, __m256i b) VPCMPGTQ ymm, ymm, ymm/m256 |
CompareGreaterThan(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_cmpgt_epi8 (__m256i a, __m256i b) VPCMPGTB ymm, ymm, ymm/m256 |
ConvertToInt32(Vector256<Int32>) |
_mm256_cvtsi256_si32 int (__m256i a) MOVD reg/m32, xmm |
ConvertToUInt32(Vector256<UInt32>) |
_mm256_cvtsi256_si32 int (__m256i a) MOVD reg/m32, xmm |
ConvertToVector256Int16(Byte*) |
VPMOVZXBW ymm, m128 |
ConvertToVector256Int16(SByte*) |
VPMOVSXBW ymm, m128 |
ConvertToVector256Int16(Vector128<Byte>) |
__m256i _mm256_cvtepu8_epi16 (__m128i a) VPMOVZXBW ymm, xmm |
ConvertToVector256Int16(Vector128<SByte>) |
__m256i _mm256_cvtepi8_epi16 (__m128i a) VPMOVSXBW ymm, xmm/m128 |
ConvertToVector256Int32(Byte*) |
VPMOVZXBD ymm, m64 |
ConvertToVector256Int32(Int16*) |
VPMOVSXWD ymm, m128 |
ConvertToVector256Int32(SByte*) |
VPMOVSXBD ymm, m64 |
ConvertToVector256Int32(UInt16*) |
VPMOVZXWD ymm, m128 |
ConvertToVector256Int32(Vector128<Byte>) |
__m256i _mm256_cvtepu8_epi32 (__m128i a) VPMOVZXBD ymm, xmm |
ConvertToVector256Int32(Vector128<Int16>) |
__m256i _mm256_cvtepi16_epi32 (__m128i a) VPMOVSXWD ymm, xmm/m128 |
ConvertToVector256Int32(Vector128<SByte>) |
__m256i _mm256_cvtepi8_epi32 (__m128i a) VPMOVSXBD ymm, xmm/m128 |
ConvertToVector256Int32(Vector128<UInt16>) |
__m256i _mm256_cvtepu16_epi32 (__m128i a) VPMOVZXWD ymm, xmm |
ConvertToVector256Int64(Byte*) |
VPMOVZXBQ ymm, m32 |
ConvertToVector256Int64(Int16*) |
VPMOVSXWQ ymm, m64 |
ConvertToVector256Int64(Int32*) |
VPMOVSXDQ ymm, m128 |
ConvertToVector256Int64(SByte*) |
VPMOVSXBQ ymm, m32 |
ConvertToVector256Int64(UInt16*) |
VPMOVZXWQ ymm, m64 |
ConvertToVector256Int64(UInt32*) |
VPMOVZXDQ ymm, m128 |
ConvertToVector256Int64(Vector128<Byte>) |
__m256i _mm256_cvtepu8_epi64 (__m128i a) VPMOVZXBQ ymm, xmm |
ConvertToVector256Int64(Vector128<Int16>) |
__m256i _mm256_cvtepi16_epi64 (__m128i a) VPMOVSXWQ ymm, xmm/m128 |
ConvertToVector256Int64(Vector128<Int32>) |
__m256i _mm256_cvtepi32_epi64 (__m128i a) VPMOVSXDQ ymm, xmm/m128 |
ConvertToVector256Int64(Vector128<SByte>) |
__m256i _mm256_cvtepi8_epi64 (__m128i a) VPMOVSXBQ ymm, xmm/m128 |
ConvertToVector256Int64(Vector128<UInt16>) |
__m256i _mm256_cvtepu16_epi64 (__m128i a) VPMOVZXWQ ymm, xmm |
ConvertToVector256Int64(Vector128<UInt32>) |
__m256i _mm256_cvtepu32_epi64 (__m128i a) VPMOVZXDQ ymm, xmm |
Equals(Object) |
Określa, czy dany obiekt jest taki sam, jak bieżący obiekt. (Odziedziczone po Object) |
ExtractVector128(Vector256<Byte>, Byte) |
__m128i _mm256_extracti128_si256 (__m256i a, const int imm8) VEXTRACTI128 xmm, ymm, imm8 |
ExtractVector128(Vector256<Int16>, Byte) |
__m128i _mm256_extracti128_si256 (__m256i a, const int imm8) VEXTRACTI128 xmm, ymm, imm8 |
ExtractVector128(Vector256<Int32>, Byte) |
__m128i _mm256_extracti128_si256 (__m256i a, const int imm8) VEXTRACTI128 xmm, ymm, imm8 |
ExtractVector128(Vector256<Int64>, Byte) |
__m128i _mm256_extracti128_si256 (__m256i a, const int imm8) VEXTRACTI128 xmm, ymm, imm8 |
ExtractVector128(Vector256<SByte>, Byte) |
__m128i _mm256_extracti128_si256 (__m256i a, const int imm8) VEXTRACTI128 xmm, ymm, imm8 |
ExtractVector128(Vector256<UInt16>, Byte) |
__m128i _mm256_extracti128_si256 (__m256i a, const int imm8) VEXTRACTI128 xmm, ymm, imm8 |
ExtractVector128(Vector256<UInt32>, Byte) |
__m128i _mm256_extracti128_si256 (__m256i a, const int imm8) VEXTRACTI128 xmm, ymm, imm8 |
ExtractVector128(Vector256<UInt64>, Byte) |
__m128i _mm256_extracti128_si256 (__m256i a, const int imm8) VEXTRACTI128 xmm, ymm, imm8 |
GatherMaskVector128(Vector128<Double>, Double*, Vector128<Int32>, Vector128<Double>, Byte) |
__m128d _mm_mask_i32gather_pd (__m128d src, double const* base_addr, __m128i vindex, maska __m128d, skala const int) KODDPD xmm, vm32x, xmm |
GatherMaskVector128(Vector128<Double>, Double*, Vector128<Int64>, Vector128<Double>, Byte) |
__m128d _mm_mask_i64gather_pd (__m128d src, double const* base_addr, __m128i vindex, maska __m128d, skala int const) NVIDIATHERQPD xmm, vm64x, xmm |
GatherMaskVector128(Vector128<Int32>, Int32*, Vector128<Int32>, Vector128<Int32>, Byte) |
__m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, maska __m128i, skala int const) VPGATHERDD xmm, vm32x, xmm |
GatherMaskVector128(Vector128<Int32>, Int32*, Vector128<Int64>, Vector128<Int32>, Byte) |
__m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, maska __m128i, skala int const) VPGATHERQD xmm, vm64x, xmm |
GatherMaskVector128(Vector128<Int32>, Int32*, Vector256<Int64>, Vector128<Int32>, Byte) |
__m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, maska __m128i, skala int const) VPGATHERQD xmm, vm32y, xmm |
GatherMaskVector128(Vector128<Int64>, Int64*, Vector128<Int32>, Vector128<Int64>, Byte) |
__m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, maska __m128i, skala int const) VPGATHERDQ xmm, vm32x, xmm |
GatherMaskVector128(Vector128<Int64>, Int64*, Vector128<Int64>, Vector128<Int64>, Byte) |
__m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, maska __m128i, skala int const) VPGATHERQQ xmm, vm64x, xmm |
GatherMaskVector128(Vector128<Single>, Single*, Vector128<Int32>, Vector128<Single>, Byte) |
__m128 _mm_mask_i32gather_ps (__m128 src, float const* base_addr, __m128i vindex, maska __m128, skala int const) NVIDIATHERDPS xmm, vm32x, xmm |
GatherMaskVector128(Vector128<Single>, Single*, Vector128<Int64>, Vector128<Single>, Byte) |
__m128 _mm_mask_i64gather_ps (__m128 src, float const* base_addr, __m128i vindex, maska __m128, skala int const) NVIDIATHERQPS xmm, vm64x, xmm |
GatherMaskVector128(Vector128<Single>, Single*, Vector256<Int64>, Vector128<Single>, Byte) |
__m128 _mm256_mask_i64gather_ps (__m128 src, float const* base_addr, __m256i vindex, maska __m128, skala int const) PACKTHERQPS xmm, vm32y, xmm |
GatherMaskVector128(Vector128<UInt32>, UInt32*, Vector128<Int32>, Vector128<UInt32>, Byte) |
__m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, maska __m128i, skala int const) VPGATHERDD xmm, vm32x, xmm |
GatherMaskVector128(Vector128<UInt32>, UInt32*, Vector128<Int64>, Vector128<UInt32>, Byte) |
__m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, maska __m128i, skala int const) VPGATHERQD xmm, vm64x, xmm |
GatherMaskVector128(Vector128<UInt32>, UInt32*, Vector256<Int64>, Vector128<UInt32>, Byte) |
__m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, maska __m128i, skala int const) VPGATHERQD xmm, vm32y, xmm |
GatherMaskVector128(Vector128<UInt64>, UInt64*, Vector128<Int32>, Vector128<UInt64>, Byte) |
__m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, maska __m128i, skala int const) VPGATHERDQ xmm, vm32x, xmm |
GatherMaskVector128(Vector128<UInt64>, UInt64*, Vector128<Int64>, Vector128<UInt64>, Byte) |
__m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, maska __m128i, skala int const) VPGATHERQQ xmm, vm64x, xmm |
GatherMaskVector256(Vector256<Double>, Double*, Vector128<Int32>, Vector256<Double>, Byte) |
__m256d _mm256_mask_i32gather_pd (__m256d src, double const* base_addr, __m128i vindex, maska __m256d, skala int const) VPGATHERDPD ymm, vm32y, ymm |
GatherMaskVector256(Vector256<Double>, Double*, Vector256<Int64>, Vector256<Double>, Byte) |
__m256d _mm256_mask_i64gather_pd (__m256d src, double const* base_addr, __m256i vindex, maska __m256d, skala int const) TMTHERQPD ymm, vm32y, ymm |
GatherMaskVector256(Vector256<Int32>, Int32*, Vector256<Int32>, Vector256<Int32>, Byte) |
__m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, maska __m256i, skala int const) VPGATHERDD ymm, vm32y, ymm |
GatherMaskVector256(Vector256<Int64>, Int64*, Vector128<Int32>, Vector256<Int64>, Byte) |
__m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, maska __m256i, skala int const) VPGATHERDQ ymm, vm32y, ymm |
GatherMaskVector256(Vector256<Int64>, Int64*, Vector256<Int64>, Vector256<Int64>, Byte) |
__m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, maska __m256i, skala int const) VPGATHERQQ ymm, vm32y, ymm |
GatherMaskVector256(Vector256<Single>, Single*, Vector256<Int32>, Vector256<Single>, Byte) |
__m256 _mm256_mask_i32gather_ps (__m256 src, float const* base_addr, __m256i vindex, maska __m256, skala int const) VPGATHERDPS ymm, vm32y, ymm |
GatherMaskVector256(Vector256<UInt32>, UInt32*, Vector256<Int32>, Vector256<UInt32>, Byte) |
__m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, maska __m256i, skala int const) VPGATHERDD ymm, vm32y, ymm |
GatherMaskVector256(Vector256<UInt64>, UInt64*, Vector128<Int32>, Vector256<UInt64>, Byte) |
__m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, maska __m256i, skala int const) VPGATHERDQ ymm, vm32y, ymm |
GatherMaskVector256(Vector256<UInt64>, UInt64*, Vector256<Int64>, Vector256<UInt64>, Byte) |
__m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, maska __m256i, skala int const) VPGATHERQQ ymm, vm32y, ymm |
GatherVector128(Double*, Vector128<Int32>, Byte) |
__m128d _mm_i32gather_pd (double const* base_addr, __m128i vindex, const int scale) KODDPD xmm, vm32x, xmm |
GatherVector128(Double*, Vector128<Int64>, Byte) |
__m128d _mm_i64gather_pd (double const* base_addr, __m128i vindex, const int scale) NVIDIATHERQPD xmm, vm64x, xmm |
GatherVector128(Int32*, Vector128<Int32>, Byte) |
__m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale) VPGATHERDD xmm, vm32x, xmm |
GatherVector128(Int32*, Vector128<Int64>, Byte) |
__m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale) VPGATHERQD xmm, vm64x, xmm |
GatherVector128(Int32*, Vector256<Int64>, Byte) |
__m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale) VPGATHERQD xmm, vm64y, xmm |
GatherVector128(Int64*, Vector128<Int32>, Byte) |
__m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale) VPGATHERDQ xmm, vm32x, xmm |
GatherVector128(Int64*, Vector128<Int64>, Byte) |
__m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale) VPGATHERQQ xmm, vm64x, xmm |
GatherVector128(Single*, Vector128<Int32>, Byte) |
__m128 _mm_i32gather_ps (float const* base_addr, __m128i vindex, const int scale) NVIDIATHERDPS xmm, vm32x, xmm |
GatherVector128(Single*, Vector128<Int64>, Byte) |
__m128 _mm_i64gather_ps (float const* base_addr, __m128i vindex, const int scale) NVIDIATHERQPS xmm, vm64x, xmm |
GatherVector128(Single*, Vector256<Int64>, Byte) |
__m128 _mm256_i64gather_ps (float const* base_addr, __m256i vindex, const int scale) NVIDIATHERQPS xmm, vm64y, xmm |
GatherVector128(UInt32*, Vector128<Int32>, Byte) |
__m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale) VPGATHERDD xmm, vm32x, xmm |
GatherVector128(UInt32*, Vector128<Int64>, Byte) |
__m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale) VPGATHERQD xmm, vm64x, xmm |
GatherVector128(UInt32*, Vector256<Int64>, Byte) |
__m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale) VPGATHERQD xmm, vm64y, xmm |
GatherVector128(UInt64*, Vector128<Int32>, Byte) |
__m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale) VPGATHERDQ xmm, vm32x, xmm |
GatherVector128(UInt64*, Vector128<Int64>, Byte) |
__m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale) VPGATHERQQ xmm, vm64x, xmm |
GatherVector256(Double*, Vector128<Int32>, Byte) |
__m256d _mm256_i32gather_pd (double const* base_addr, __m128i vindex, const int scale) TMDPD ymm, vm32y, ymm |
GatherVector256(Double*, Vector256<Int64>, Byte) |
__m256d _mm256_i64gather_pd (double const* base_addr, __m256i vindex, const int scale) TMTHERQPD ymm, vm64y, ymm |
GatherVector256(Int32*, Vector256<Int32>, Byte) |
__m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale) VPGATHERDD ymm, vm32y, ymm |
GatherVector256(Int64*, Vector128<Int32>, Byte) |
__m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale) VPGATHERDQ ymm, vm32y, ymm |
GatherVector256(Int64*, Vector256<Int64>, Byte) |
__m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale) VPGATHERQQ ymm, vm64y, ymm |
GatherVector256(Single*, Vector256<Int32>, Byte) |
__m256 _mm256_i32gather_ps (float const* base_addr, __m256i vindex, const int scale) HDTHERDPS ymm, vm32y, ymm |
GatherVector256(UInt32*, Vector256<Int32>, Byte) |
__m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale) VPGATHERDD ymm, vm32y, ymm |
GatherVector256(UInt64*, Vector128<Int32>, Byte) |
__m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale) VPGATHERDQ ymm, vm32y, ymm |
GatherVector256(UInt64*, Vector256<Int64>, Byte) |
__m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale) VPGATHERQQ ymm, vm64y, ymm |
GetHashCode() |
Służy jako domyślna funkcja skrótu. (Odziedziczone po Object) |
GetType() |
Type Pobiera bieżące wystąpienie. (Odziedziczone po Object) |
HorizontalAdd(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_hadd_epi16 (__m256i a, __m256i b) VPHADDW ymm, ymm, ymm/m256 |
HorizontalAdd(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_hadd_epi32 (__m256i a, __m256i b) VPHADDD ymm, ymm, ymm/m256 |
HorizontalAddSaturate(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_hadds_epi16 (__m256i a, __m256i b) VPHADDSW ymm, ymm, ymm/m256 |
HorizontalSubtract(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_hsub_epi16 (__m256i a, __m256i b) VPHSUBW ymm, ymm, ymm/m256 |
HorizontalSubtract(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_hsub_epi32 (__m256i a, __m256i b) VPHSUBD ymm, ymm, ymm/m256 |
HorizontalSubtractSaturate(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_hsubs_epi16 (__m256i a, __m256i b) VPHSUBSW ymm, ymm, ymm/m256 |
InsertVector128(Vector256<Byte>, Vector128<Byte>, Byte) |
__m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8) VINSERTI128 ymm, ymm, xmm, imm8 |
InsertVector128(Vector256<Int16>, Vector128<Int16>, Byte) |
__m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8) VINSERTI128 ymm, ymm, xmm, imm8 |
InsertVector128(Vector256<Int32>, Vector128<Int32>, Byte) |
__m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8) VINSERTI128 ymm, ymm, xmm, imm8 |
InsertVector128(Vector256<Int64>, Vector128<Int64>, Byte) |
__m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8) VINSERTI128 ymm, ymm, xmm, imm8 |
InsertVector128(Vector256<SByte>, Vector128<SByte>, Byte) |
__m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8) VINSERTI128 ymm, ymm, xmm, imm8 |
InsertVector128(Vector256<UInt16>, Vector128<UInt16>, Byte) |
__m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8) VINSERTI128 ymm, ymm, xmm, imm8 |
InsertVector128(Vector256<UInt32>, Vector128<UInt32>, Byte) |
__m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8) VINSERTI128 ymm, ymm, xmm, imm8 |
InsertVector128(Vector256<UInt64>, Vector128<UInt64>, Byte) |
__m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8) VINSERTI128 ymm, ymm, xmm, imm8 |
LoadAlignedVector256NonTemporal(Byte*) |
__m256i _mm256_stream_load_si256 (__m256i const* mem_addr) VMOVNTDQA ymm, m256 |
LoadAlignedVector256NonTemporal(Int16*) |
__m256i _mm256_stream_load_si256 (__m256i const* mem_addr) VMOVNTDQA ymm, m256 |
LoadAlignedVector256NonTemporal(Int32*) |
__m256i _mm256_stream_load_si256 (__m256i const* mem_addr) VMOVNTDQA ymm, m256 |
LoadAlignedVector256NonTemporal(Int64*) |
__m256i _mm256_stream_load_si256 (__m256i const* mem_addr) VMOVNTDQA ymm, m256 |
LoadAlignedVector256NonTemporal(SByte*) |
__m256i _mm256_stream_load_si256 (__m256i const* mem_addr) VMOVNTDQA ymm, m256 |
LoadAlignedVector256NonTemporal(UInt16*) |
__m256i _mm256_stream_load_si256 (__m256i const* mem_addr) VMOVNTDQA ymm, m256 |
LoadAlignedVector256NonTemporal(UInt32*) |
__m256i _mm256_stream_load_si256 (__m256i const* mem_addr) VMOVNTDQA ymm, m256 |
LoadAlignedVector256NonTemporal(UInt64*) |
__m256i _mm256_stream_load_si256 (__m256i const* mem_addr) VMOVNTDQA ymm, m256 |
MaskLoad(Int32*, Vector128<Int32>) |
__m128i _mm_maskload_epi32 (int const* mem_addr, maska __m128i) VPMASKMOVD xmm, xmm, m128 |
MaskLoad(Int32*, Vector256<Int32>) |
__m256i _mm256_maskload_epi32 (int const* mem_addr, maska __m256i) VPMASKMOVD ymm, ymm, m256 |
MaskLoad(Int64*, Vector128<Int64>) |
__m128i _mm_maskload_epi64 (__int64 const* mem_addr, maska __m128i) VPMASKMOVQ xmm, xmm, m128 |
MaskLoad(Int64*, Vector256<Int64>) |
__m256i _mm256_maskload_epi64 (__int64 const* mem_addr, maska __m256i) VPMASKMOVQ ymm, ymm, m256 |
MaskLoad(UInt32*, Vector128<UInt32>) |
__m128i _mm_maskload_epi32 (int const* mem_addr, maska __m128i) VPMASKMOVD xmm, xmm, m128 |
MaskLoad(UInt32*, Vector256<UInt32>) |
__m256i _mm256_maskload_epi32 (int const* mem_addr, maska __m256i) VPMASKMOVD ymm, ymm, m256 |
MaskLoad(UInt64*, Vector128<UInt64>) |
__m128i _mm_maskload_epi64 (__int64 const* mem_addr, maska __m128i) VPMASKMOVQ xmm, xmm, m128 |
MaskLoad(UInt64*, Vector256<UInt64>) |
__m256i _mm256_maskload_epi64 (__int64 const* mem_addr, maska __m256i) VPMASKMOVQ ymm, ymm, m256 |
MaskStore(Int32*, Vector128<Int32>, Vector128<Int32>) |
void _mm_maskstore_epi32 (int* mem_addr, maska __m128i, __m128i a) VPMASKMOVD m128, xmm, xmm |
MaskStore(Int32*, Vector256<Int32>, Vector256<Int32>) |
void _mm256_maskstore_epi32 (int* mem_addr, maska __m256i, __m256i a) VPMASKMOVD m256, ymm, ymm |
MaskStore(Int64*, Vector128<Int64>, Vector128<Int64>) |
void _mm_maskstore_epi64 (__int64* mem_addr, maska __m128i, __m128i a) VPMASKMOVQ m128, xmm, xmm |
MaskStore(Int64*, Vector256<Int64>, Vector256<Int64>) |
void _mm256_maskstore_epi64 (__int64* mem_addr, maska __m256i, __m256i a) VPMASKMOVQ m256, ymm, ymm |
MaskStore(UInt32*, Vector128<UInt32>, Vector128<UInt32>) |
void _mm_maskstore_epi32 (int* mem_addr, maska __m128i, __m128i a) VPMASKMOVD m128, xmm, xmm |
MaskStore(UInt32*, Vector256<UInt32>, Vector256<UInt32>) |
void _mm256_maskstore_epi32 (int* mem_addr, maska __m256i, __m256i a) VPMASKMOVD m256, ymm, ymm |
MaskStore(UInt64*, Vector128<UInt64>, Vector128<UInt64>) |
void _mm_maskstore_epi64 (__int64* mem_addr, maska __m128i, __m128i a) VPMASKMOVQ m128, xmm, xmm |
MaskStore(UInt64*, Vector256<UInt64>, Vector256<UInt64>) |
void _mm256_maskstore_epi64 (__int64* mem_addr, maska __m256i, __m256i a) VPMASKMOVQ m256, ymm, ymm |
Max(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_max_epu8 (__m256i a, __m256i b) VPMAXUB ymm, ymm, ymm/m256 |
Max(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_max_epi16 (__m256i a, __m256i b) VPMAXSW ymm, ymm, ymm/m256 |
Max(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_max_epi32 (__m256i a, __m256i b) VPMAXSD ymm, ymm, ymm/m256 |
Max(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_max_epi8 (__m256i a, __m256i b) VPMAXSB ymm, ymm, ymm/m256 |
Max(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_max_epu16 (__m256i a, __m256i b) VPMAXUW ymm, ymm, ymm/m256 |
Max(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_max_epu32 (__m256i a, __m256i b) VPMAXUD ymm, ymm, ymm/m256 |
MemberwiseClone() |
Tworzy płytkią kopię bieżącego Objectelementu . (Odziedziczone po Object) |
Min(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_min_epu8 (__m256i a, __m256i b) VPMINUB ymm, ymm, ymm/m256 |
Min(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_min_epi16 (__m256i a, __m256i b) VPMINSW ymm, ymm, ymm/m256 |
Min(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_min_epi32 (__m256i a, __m256i b) VPMINSD ymm, ymm, ymm/m256 |
Min(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_min_epi8 (__m256i a, __m256i b) VPMINSB ymm, ymm, ymm/m256 |
Min(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_min_epu16 (__m256i a, __m256i b) VPMINUW ymm, ymm, ymm/m256 |
Min(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_min_epu32 (__m256i a, __m256i b) VPMINUD ymm, ymm, ymm/m256 |
MoveMask(Vector256<Byte>) |
_mm256_movemask_epi8 int (__m256i a) VPMOVMSKB reg, ymm |
MoveMask(Vector256<SByte>) |
_mm256_movemask_epi8 int (__m256i a) VPMOVMSKB reg, ymm |
MultipleSumAbsoluteDifferences(Vector256<Byte>, Vector256<Byte>, Byte) |
__m256i _mm256_mpsadbw_epu8 (__m256i a, __m256i b, const int imm8) VMPSADBW ymm, ymm, ymm/m256, imm8 |
Multiply(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_mul_epi32 (__m256i a, __m256i b) VPMULDQ ymm, ymm, ymm/m256 |
Multiply(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_mul_epu32 (__m256i a, __m256i b) VPMULUDQ ymm, ymm, ymm/m256 |
MultiplyAddAdjacent(Vector256<Byte>, Vector256<SByte>) |
__m256i _mm256_maddubs_epi16 (__m256i a, __m256i b) VPMADDUBSW ymm, ymm, ymm/m256 |
MultiplyAddAdjacent(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_madd_epi16 (__m256i a, __m256i b) VPMADDWD ymm, ymm, ymm/m256 |
MultiplyHigh(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_mulhi_epi16 (__m256i a, __m256i b) VPMULHW ymm, ymm, ymm/m256 |
MultiplyHigh(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_mulhi_epu16 (__m256i a, __m256i b) VPMULHUW ymm, ymm, ymm/m256 |
MultiplyHighRoundScale(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_mulhrs_epi16 (__m256i a, __m256i b) VPMULHRSW ymm, ymm, ymm/m256 |
MultiplyLow(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_mullo_epi16 (__m256i a, __m256i b) VPMULLW ymm, ymm, ymm/m256 |
MultiplyLow(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_mullo_epi32 (__m256i a, __m256i b) VPMULLD ymm, ymm, ymm/m256 |
MultiplyLow(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_mullo_epi16 (__m256i a, __m256i b) VPMULLW ymm, ymm, ymm/m256 |
MultiplyLow(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_mullo_epi32 (__m256i a, __m256i b) VPMULLD ymm, ymm, ymm/m256 |
Or(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_or_si256 (__m256i a, __m256i b) VPOR ymm, ymm, ymm/m256 |
Or(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_or_si256 (__m256i a, __m256i b) VPOR ymm, ymm, ymm/m256 |
Or(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_or_si256 (__m256i a, __m256i b) VPOR ymm, ymm, ymm/m256 |
Or(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_or_si256 (__m256i a, __m256i b) VPOR ymm, ymm, ymm/m256 |
Or(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_or_si256 (__m256i a, __m256i b) VPOR ymm, ymm, ymm/m256 |
Or(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_or_si256 (__m256i a, __m256i b) VPOR ymm, ymm, ymm/m256 |
Or(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_or_si256 (__m256i a, __m256i b) VPOR ymm, ymm, ymm/m256 |
Or(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_or_si256 (__m256i a, __m256i b) VPOR ymm, ymm, ymm/m256 |
PackSignedSaturate(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_packs_epi16 (__m256i a, __m256i b) VPACKSSWB ymm, ymm, ymm/ m256 |
PackSignedSaturate(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_packs_epi32 (__m256i a, __m256i b) VPACKSSDW ymm, ymm, ymm, ymm/m256 |
PackUnsignedSaturate(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_packus_epi16 (__m256i a, __m256i b) VPACKUSWB ymm, ymm, ymm/m256 |
PackUnsignedSaturate(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_packus_epi32 (__m256i a, __m256i b) VPACKUSDW ymm, ymm, ymm/m256 |
Permute2x128(Vector256<Byte>, Vector256<Byte>, Byte) |
__m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8) VPERM2I128 ymm, ymm, ymm/m256, imm8 |
Permute2x128(Vector256<Int16>, Vector256<Int16>, Byte) |
__m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8) VPERM2I128 ymm, ymm, ymm/m256, imm8 |
Permute2x128(Vector256<Int32>, Vector256<Int32>, Byte) |
__m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8) VPERM2I128 ymm, ymm, ymm/m256, imm8 |
Permute2x128(Vector256<Int64>, Vector256<Int64>, Byte) |
__m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8) VPERM2I128 ymm, ymm, ymm/m256, imm8 |
Permute2x128(Vector256<SByte>, Vector256<SByte>, Byte) |
__m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8) VPERM2I128 ymm, ymm, ymm/m256, imm8 |
Permute2x128(Vector256<UInt16>, Vector256<UInt16>, Byte) |
__m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8) VPERM2I128 ymm, ymm, ymm/m256, imm8 |
Permute2x128(Vector256<UInt32>, Vector256<UInt32>, Byte) |
__m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8) VPERM2I128 ymm, ymm, ymm/m256, imm8 |
Permute2x128(Vector256<UInt64>, Vector256<UInt64>, Byte) |
__m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8) VPERM2I128 ymm, ymm, ymm/m256, imm8 |
Permute4x64(Vector256<Double>, Byte) |
__m256d _mm256_permute4x64_pd (__m256d, const int imm8) VPERMPD ymm, ymm/m256, imm8 |
Permute4x64(Vector256<Int64>, Byte) |
__m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8) VPERMQ ymm, ymm/m256, imm8 |
Permute4x64(Vector256<UInt64>, Byte) |
__m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8) VPERMQ ymm, ymm/m256, imm8 |
PermuteVar8x32(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_permutevar8x32_epi32 (__m256i __m256i idx) VPERMD ymm, ymm/m256, ymm |
PermuteVar8x32(Vector256<Single>, Vector256<Int32>) |
__m256 _mm256_permutevar8x32_ps (__m256 __m256i idx) VPERMPS ymm, ymm/m256, ymm |
PermuteVar8x32(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_permutevar8x32_epi32 (__m256i __m256i idx) VPERMD ymm, ymm/m256, ymm |
ShiftLeftLogical(Vector256<Int16>, Byte) |
__m256i _mm256_slli_epi16 (__m256i a, int imm8) VPSLLW ymm, ymm, imm8 |
ShiftLeftLogical(Vector256<Int16>, Vector128<Int16>) |
__m256i _mm256_sll_epi16 (__m256i liczbę __m128i) VPSLLW ymm, ymm, xmm/m128 |
ShiftLeftLogical(Vector256<Int32>, Byte) |
__m256i _mm256_slli_epi32 (__m256i a, int imm8) VPSLLD ymm, ymm, imm8 |
ShiftLeftLogical(Vector256<Int32>, Vector128<Int32>) |
__m256i _mm256_sll_epi32 (__m256i liczbę __m128i) VPSLLD ymm, ymm, xmm/m128 |
ShiftLeftLogical(Vector256<Int64>, Byte) |
__m256i _mm256_slli_epi64 (__m256i a, int imm8) VPSLLQ ymm, ymm, imm8 |
ShiftLeftLogical(Vector256<Int64>, Vector128<Int64>) |
__m256i _mm256_sll_epi64 (__m256i liczba __m128i) VPSLLQ ymm, ymm, xmm/m128 |
ShiftLeftLogical(Vector256<UInt16>, Byte) |
__m256i _mm256_slli_epi16 (__m256i a, int imm8) VPSLLW ymm, ymm, imm8 |
ShiftLeftLogical(Vector256<UInt16>, Vector128<UInt16>) |
__m256i _mm256_sll_epi16 (__m256i liczbę __m128i) VPSLLW ymm, ymm, xmm/m128 |
ShiftLeftLogical(Vector256<UInt32>, Byte) |
__m256i _mm256_slli_epi32 (__m256i a, int imm8) VPSLLD ymm, ymm, imm8 |
ShiftLeftLogical(Vector256<UInt32>, Vector128<UInt32>) |
__m256i _mm256_sll_epi32 (__m256i liczbę __m128i) VPSLLD ymm, ymm, xmm/m128 |
ShiftLeftLogical(Vector256<UInt64>, Byte) |
__m256i _mm256_slli_epi64 (__m256i a, int imm8) VPSLLQ ymm, ymm, imm8 |
ShiftLeftLogical(Vector256<UInt64>, Vector128<UInt64>) |
__m256i _mm256_sll_epi64 (__m256i liczba __m128i) VPSLLQ ymm, ymm, xmm/m128 |
ShiftLeftLogical128BitLane(Vector256<Byte>, Byte) |
__m256i _mm256_bslli_epi128 (__m256i a, const int imm8) VPSLLDQ ymm, ymm, imm8 |
ShiftLeftLogical128BitLane(Vector256<Int16>, Byte) |
__m256i _mm256_bslli_epi128 (__m256i a, const int imm8) VPSLLDQ ymm, ymm, imm8 |
ShiftLeftLogical128BitLane(Vector256<Int32>, Byte) |
__m256i _mm256_bslli_epi128 (__m256i a, const int imm8) VPSLLDQ ymm, ymm, imm8 |
ShiftLeftLogical128BitLane(Vector256<Int64>, Byte) |
__m256i _mm256_bslli_epi128 (__m256i a, const int imm8) VPSLLDQ ymm, ymm, imm8 |
ShiftLeftLogical128BitLane(Vector256<SByte>, Byte) |
__m256i _mm256_bslli_epi128 (__m256i a, const int imm8) VPSLLDQ ymm, ymm, imm8 |
ShiftLeftLogical128BitLane(Vector256<UInt16>, Byte) |
__m256i _mm256_bslli_epi128 (__m256i a, const int imm8) VPSLLDQ ymm, ymm, imm8 |
ShiftLeftLogical128BitLane(Vector256<UInt32>, Byte) |
__m256i _mm256_bslli_epi128 (__m256i a, const int imm8) VPSLLDQ ymm, ymm, imm8 |
ShiftLeftLogical128BitLane(Vector256<UInt64>, Byte) |
__m256i _mm256_bslli_epi128 (__m256i a, const int imm8) VPSLLDQ ymm, ymm, imm8 |
ShiftLeftLogicalVariable(Vector128<Int32>, Vector128<UInt32>) |
__m128i _mm_sllv_epi32 (__m128i liczbę __m128i) VPSLLVD xmm, xmm, xmm/m128 |
ShiftLeftLogicalVariable(Vector128<Int64>, Vector128<UInt64>) |
__m128i _mm_sllv_epi64 (__m128i liczbę __m128i) VPSLLVQ xmm, xmm, xmm/m128 |
ShiftLeftLogicalVariable(Vector128<UInt32>, Vector128<UInt32>) |
__m128i _mm_sllv_epi32 (__m128i liczbę __m128i) VPSLLVD xmm, xmm, xmm/m128 |
ShiftLeftLogicalVariable(Vector128<UInt64>, Vector128<UInt64>) |
__m128i _mm_sllv_epi64 (__m128i liczbę __m128i) VPSLLVQ xmm, xmm, xmm/m128 |
ShiftLeftLogicalVariable(Vector256<Int32>, Vector256<UInt32>) |
__m256i _mm256_sllv_epi32 (__m256i liczbę __m256i) VPSLLVD ymm, ymm, ymm/m256 |
ShiftLeftLogicalVariable(Vector256<Int64>, Vector256<UInt64>) |
__m256i _mm256_sllv_epi64 (__m256i liczbę __m256i) VPSLLVQ ymm, ymm, ymm/m256 |
ShiftLeftLogicalVariable(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_sllv_epi32 (__m256i liczbę __m256i) VPSLLVD ymm, ymm, ymm/m256 |
ShiftLeftLogicalVariable(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_sllv_epi64 (__m256i liczbę __m256i) VPSLLVQ ymm, ymm, ymm/m256 |
ShiftRightArithmetic(Vector256<Int16>, Byte) |
__m256i _mm256_srai_epi16 (__m256i a, int imm8) VPSRAW ymm, ymm, imm8 |
ShiftRightArithmetic(Vector256<Int16>, Vector128<Int16>) |
_mm256_sra_epi16 (__m256i liczbę __m128i) VPSRAW ymm, ymm, xmm/m128 |
ShiftRightArithmetic(Vector256<Int32>, Byte) |
__m256i _mm256_srai_epi32 (__m256i a, int imm8) VPSRAD ymm, ymm, imm8 |
ShiftRightArithmetic(Vector256<Int32>, Vector128<Int32>) |
_mm256_sra_epi32 (__m256i liczbę __m128i) VPSRAD ymm, ymm, xmm/m128 |
ShiftRightArithmeticVariable(Vector128<Int32>, Vector128<UInt32>) |
__m128i _mm_srav_epi32 (__m128i liczbę __m128i) VPSRAVD xmm, xmm, xmm/m128 |
ShiftRightArithmeticVariable(Vector256<Int32>, Vector256<UInt32>) |
__m256i _mm256_srav_epi32 (__m256i liczbę __m256i) VPSRAVD ymm, ymm, ymm/m256 |
ShiftRightLogical(Vector256<Int16>, Byte) |
__m256i _mm256_srli_epi16 (__m256i a, int imm8) VPSRLW ymm, ymm, imm8 |
ShiftRightLogical(Vector256<Int16>, Vector128<Int16>) |
__m256i _mm256_srl_epi16 (__m256i liczbę __m128i) VPSRLW ymm, ymm, xmm/m128 |
ShiftRightLogical(Vector256<Int32>, Byte) |
__m256i _mm256_srli_epi32 (__m256i a, int imm8) VPSRLD ymm, ymm, imm8 |
ShiftRightLogical(Vector256<Int32>, Vector128<Int32>) |
__m256i _mm256_srl_epi32 (__m256i liczbę __m128i) VPSRLD ymm, ymm, xmm/m128 |
ShiftRightLogical(Vector256<Int64>, Byte) |
__m256i _mm256_srli_epi64 (__m256i a, int imm8) VPSRLQ ymm, ymm, imm8 |
ShiftRightLogical(Vector256<Int64>, Vector128<Int64>) |
__m256i _mm256_srl_epi64 (__m256i liczbę __m128i) VPSRLQ ymm, ymm, xmm/m128 |
ShiftRightLogical(Vector256<UInt16>, Byte) |
__m256i _mm256_srli_epi16 (__m256i a, int imm8) VPSRLW ymm, ymm, imm8 |
ShiftRightLogical(Vector256<UInt16>, Vector128<UInt16>) |
__m256i _mm256_srl_epi16 (__m256i liczbę __m128i) VPSRLW ymm, ymm, xmm/m128 |
ShiftRightLogical(Vector256<UInt32>, Byte) |
__m256i _mm256_srli_epi32 (__m256i a, int imm8) VPSRLD ymm, ymm, imm8 |
ShiftRightLogical(Vector256<UInt32>, Vector128<UInt32>) |
__m256i _mm256_srl_epi32 (__m256i liczbę __m128i) VPSRLD ymm, ymm, xmm/m128 |
ShiftRightLogical(Vector256<UInt64>, Byte) |
__m256i _mm256_srli_epi64 (__m256i a, int imm8) VPSRLQ ymm, ymm, imm8 |
ShiftRightLogical(Vector256<UInt64>, Vector128<UInt64>) |
__m256i _mm256_srl_epi64 (__m256i liczbę __m128i) VPSRLQ ymm, ymm, xmm/m128 |
ShiftRightLogical128BitLane(Vector256<Byte>, Byte) |
__m256i _mm256_bsrli_epi128 (__m256i a, const int imm8) VPSRLDQ ymm, ymm, imm8 |
ShiftRightLogical128BitLane(Vector256<Int16>, Byte) |
__m256i _mm256_bsrli_epi128 (__m256i a, const int imm8) VPSRLDQ ymm, ymm, imm8 |
ShiftRightLogical128BitLane(Vector256<Int32>, Byte) |
__m256i _mm256_bsrli_epi128 (__m256i a, const int imm8) VPSRLDQ ymm, ymm, imm8 |
ShiftRightLogical128BitLane(Vector256<Int64>, Byte) |
__m256i _mm256_bsrli_epi128 (__m256i a, const int imm8) VPSRLDQ ymm, ymm, imm8 |
ShiftRightLogical128BitLane(Vector256<SByte>, Byte) |
__m256i _mm256_bsrli_epi128 (__m256i a, const int imm8) VPSRLDQ ymm, ymm, imm8 |
ShiftRightLogical128BitLane(Vector256<UInt16>, Byte) |
__m256i _mm256_bsrli_epi128 (__m256i a, const int imm8) VPSRLDQ ymm, ymm, imm8 |
ShiftRightLogical128BitLane(Vector256<UInt32>, Byte) |
__m256i _mm256_bsrli_epi128 (__m256i a, const int imm8) VPSRLDQ ymm, ymm, imm8 |
ShiftRightLogical128BitLane(Vector256<UInt64>, Byte) |
__m256i _mm256_bsrli_epi128 (__m256i a, const int imm8) VPSRLDQ ymm, ymm, imm8 |
ShiftRightLogicalVariable(Vector128<Int32>, Vector128<UInt32>) |
__m128i _mm_srlv_epi32 (__m128i liczbę __m128i) VPSRLVD xmm, xmm, xmm/m128 |
ShiftRightLogicalVariable(Vector128<Int64>, Vector128<UInt64>) |
__m128i _mm_srlv_epi64 (__m128i liczbę __m128i) VPSRLVQ xmm, xmm, xmm/m128 |
ShiftRightLogicalVariable(Vector128<UInt32>, Vector128<UInt32>) |
__m128i _mm_srlv_epi32 (__m128i liczbę __m128i) VPSRLVD xmm, xmm, xmm/m128 |
ShiftRightLogicalVariable(Vector128<UInt64>, Vector128<UInt64>) |
__m128i _mm_srlv_epi64 (__m128i liczbę __m128i) VPSRLVQ xmm, xmm, xmm/m128 |
ShiftRightLogicalVariable(Vector256<Int32>, Vector256<UInt32>) |
__m256i _mm256_srlv_epi32 (__m256i liczbę __m256i) VPSRLVD ymm, ymm, ymm/m256 |
ShiftRightLogicalVariable(Vector256<Int64>, Vector256<UInt64>) |
__m256i _mm256_srlv_epi64 (__m256i liczbę __m256i) VPSRLVQ ymm, ymm, ymm/m256 |
ShiftRightLogicalVariable(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_srlv_epi32 (__m256i liczbę __m256i) VPSRLVD ymm, ymm, ymm/m256 |
ShiftRightLogicalVariable(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_srlv_epi64 (__m256i liczbę __m256i) VPSRLVQ ymm, ymm, ymm/m256 |
Shuffle(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_shuffle_epi8 (__m256i a, __m256i b) VPSHUFB ymm, ymm, ymm/m256 |
Shuffle(Vector256<Int32>, Byte) |
__m256i _mm256_shuffle_epi32 (__m256i, const int imm8) VPSHUFD ymm, ymm/m256, imm8 |
Shuffle(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_shuffle_epi8 (__m256i a, __m256i b) VPSHUFB ymm, ymm, ymm/m256 |
Shuffle(Vector256<UInt32>, Byte) |
__m256i _mm256_shuffle_epi32 (__m256i, const int imm8) VPSHUFD ymm, ymm/m256, imm8 |
ShuffleHigh(Vector256<Int16>, Byte) |
__m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8) VPSHUFHW ymm, ymm/m256, imm8 |
ShuffleHigh(Vector256<UInt16>, Byte) |
__m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8) VPSHUFHW ymm, ymm/m256, imm8 |
ShuffleLow(Vector256<Int16>, Byte) |
__m256i _mm256_shufflelo_epi16 (__m256i, const int imm8) VPSHUFLW ymm, ymm/m256, imm8 |
ShuffleLow(Vector256<UInt16>, Byte) |
__m256i _mm256_shufflelo_epi16 (__m256i, const int imm8) VPSHUFLW ymm, ymm/m256, imm8 |
Sign(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_sign_epi16 (__m256i a, __m256i b) VPSIGNW ymm, ymm, ymm/m256 |
Sign(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_sign_epi32 (__m256i a, __m256i b) VPSIGND ymm, ymm, ymm/m256 |
Sign(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_sign_epi8 (__m256i a, __m256i b) VPSIGNB ymm, ymm, ymm/m256 |
Subtract(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_sub_epi8 (__m256i a, __m256i b) VPSUBB ymm, ymm, ymm/m256 |
Subtract(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_sub_epi16 (__m256i a, __m256i b) VPSUBW ymm, ymm, ymm/m256 |
Subtract(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_sub_epi32 (__m256i a, __m256i b) VPSUBD ymm, ymm, ymm/m256 |
Subtract(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_sub_epi64 (__m256i a, __m256i b) VPSUBQ ymm, ymm, ymm/m256 |
Subtract(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_sub_epi8 (__m256i a, __m256i b) VPSUBB ymm, ymm, ymm/m256 |
Subtract(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_sub_epi16 (__m256i a, __m256i b) VPSUBW ymm, ymm, ymm/m256 |
Subtract(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_sub_epi32 (__m256i a, __m256i b) VPSUBD ymm, ymm, ymm/m256 |
Subtract(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_sub_epi64 (__m256i a, __m256i b) VPSUBQ ymm, ymm, ymm/m256 |
SubtractSaturate(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_subs_epu8 (__m256i a, __m256i b) VPSUBUSB ymm, ymm, ymm/m256 |
SubtractSaturate(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_subs_epi16 (__m256i a, __m256i b) VPSUBSW ymm, ymm, ymm/m256 |
SubtractSaturate(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_subs_epi8 (__m256i a, __m256i b) VPSUBSB ymm, ymm, ymm/m256 |
SubtractSaturate(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_subs_epu16 (__m256i a, __m256i b) VPSUBUSW ymm, ymm, ymm/m256 |
SumAbsoluteDifferences(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_sad_epu8 (__m256i a, __m256i b) VPSADBW ymm, ymm, ymm/m256 |
ToString() |
Zwraca ciąg reprezentujący bieżący obiekt. (Odziedziczone po Object) |
UnpackHigh(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b) VPUNPCKHBW ymm, ymm, ymm/m256 |
UnpackHigh(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b) VPUNPCKHWD ymm, ymm, ymm/m256 |
UnpackHigh(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b) VPUNPCKHDQ ymm, ymm, ymm/ m256 |
UnpackHigh(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b) VPUNPCKHQDQ ymm, ymm, ymm/ m256 |
UnpackHigh(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b) VPUNPCKHBW ymm, ymm, ymm/m256 |
UnpackHigh(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b) VPUNPCKHWD ymm, ymm, ymm/m256 |
UnpackHigh(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b) VPUNPCKHDQ ymm, ymm, ymm/ m256 |
UnpackHigh(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b) VPUNPCKHQDQ ymm, ymm, ymm/ m256 |
UnpackLow(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b) VPUNPCKLBW ymm, ymm, ymm/m256 |
UnpackLow(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b) VPUNPCKLWD ymm, ymm, ymm/m256 |
UnpackLow(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b) VPUNPCKLDQ ymm, ymm, ymm/ m256 |
UnpackLow(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b) VPUNPCKLQQQ ymm, ymm, ymm/ m256 |
UnpackLow(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b) VPUNPCKLBW ymm, ymm, ymm/m256 |
UnpackLow(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b) VPUNPCKLWD ymm, ymm, ymm/m256 |
UnpackLow(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b) VPUNPCKLDQ ymm, ymm, ymm/ m256 |
UnpackLow(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b) VPUNPCKLQQQ ymm, ymm, ymm/ m256 |
Xor(Vector256<Byte>, Vector256<Byte>) |
__m256i _mm256_xor_si256 (__m256i a, __m256i b) VPXOR ymm, ymm, ymm/m256 |
Xor(Vector256<Int16>, Vector256<Int16>) |
__m256i _mm256_xor_si256 (__m256i a, __m256i b) VPXOR ymm, ymm, ymm/m256 |
Xor(Vector256<Int32>, Vector256<Int32>) |
__m256i _mm256_xor_si256 (__m256i a, __m256i b) VPXOR ymm, ymm, ymm/m256 |
Xor(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_xor_si256 (__m256i a, __m256i b) VPXOR ymm, ymm, ymm/m256 |
Xor(Vector256<SByte>, Vector256<SByte>) |
__m256i _mm256_xor_si256 (__m256i a, __m256i b) VPXOR ymm, ymm, ymm/m256 |
Xor(Vector256<UInt16>, Vector256<UInt16>) |
__m256i _mm256_xor_si256 (__m256i a, __m256i b) VPXOR ymm, ymm, ymm/m256 |
Xor(Vector256<UInt32>, Vector256<UInt32>) |
__m256i _mm256_xor_si256 (__m256i a, __m256i b) VPXOR ymm, ymm, ymm/m256 |
Xor(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_xor_si256 (__m256i a, __m256i b) VPXOR ymm, ymm, ymm/m256 |