Udostępnij za pośrednictwem


Sse2 Klasa

Definicja

Ważne

Ten interfejs API nie jest zgodny ze specyfikacją CLS.

Ta klasa zapewnia dostęp do instrukcji sprzętowych intel SSE2 za pośrednictwem funkcji wewnętrznych.

public ref class Sse2 abstract : System::Runtime::Intrinsics::X86::Sse
[System.CLSCompliant(false)]
public abstract class Sse2 : System.Runtime.Intrinsics.X86.Sse
[<System.CLSCompliant(false)>]
type Sse2 = class
    inherit Sse
Public MustInherit Class Sse2
Inherits Sse
Dziedziczenie
Dziedziczenie
Sse2
Pochodne
Atrybuty

Właściwości

IsSupported

Ta klasa zapewnia dostęp do instrukcji sprzętowych intel SSE2 za pośrednictwem funkcji wewnętrznych.

Metody

Add(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_add_epi8 (__m128i a, __m128i b)

PADDB xmm, xmm/m128

Add(Vector128<Double>, Vector128<Double>)

__m128d _mm_add_pd (__m128d a, __m128d b)

ADDPD xmm, xmm/m128

Add(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_add_epi16 (__m128i a, __m128i b)

PADDW xmm, xmm/m128

Add(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_add_epi32 (__m128i a, __m128i b)

PADDD xmm, xmm/m128

Add(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_add_epi64 (__m128i a, __m128i b)

PADDQ xmm, xmm/m128

Add(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_add_epi8 (__m128i a, __m128i b)

PADDB xmm, xmm/m128

Add(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_add_epi16 (__m128i a, __m128i b)

PADDW xmm, xmm/m128

Add(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_add_epi32 (__m128i a, __m128i b)

PADDD xmm, xmm/m128

Add(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_add_epi64 (__m128i a, __m128i b)

PADDQ xmm, xmm/m128

AddSaturate(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_adds_epu8 (__m128i a, __m128i b)

PADDUSB xmm, xmm/m128

AddSaturate(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_adds_epi16 (__m128i a, __m128i b)

PADDSW xmm, xmm/m128

AddSaturate(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_adds_epi8 (__m128i a, __m128i b)

PADDSB xmm, xmm/m128

AddSaturate(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_adds_epu16 (__m128i a, __m128i b)

PADDUSW xmm, xmm/m128

AddScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_add_sd (__m128d a, __m128d b)

ADDSD xmm, xmm/m64

And(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_and_si128 (__m128i a, __m128i b)

PAND xmm, xmm/m128

And(Vector128<Double>, Vector128<Double>)

__m128d _mm_and_pd (__m128d __m128d b)

ANDPD xmm, xmm/m128

And(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_and_si128 (__m128i a, __m128i b)

PAND xmm, xmm/m128

And(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_and_si128 (__m128i a, __m128i b)

PAND xmm, xmm/m128

And(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_and_si128 (__m128i a, __m128i b)

PAND xmm, xmm/m128

And(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_and_si128 (__m128i a, __m128i b)

PAND xmm, xmm/m128

And(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_and_si128 (__m128i a, __m128i b)

PAND xmm, xmm/m128

And(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_and_si128 (__m128i a, __m128i b)

PAND xmm, xmm/m128

And(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_and_si128 (__m128i a, __m128i b)

PAND xmm, xmm/m128

AndNot(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_andnot_si128 (__m128i a, __m128i b)

PANDN xmm, xmm/m128

AndNot(Vector128<Double>, Vector128<Double>)

__m128d _mm_andnot_pd (__m128d a, __m128d b)

ADDNPD xmm, xmm/m128

AndNot(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_andnot_si128 (__m128i a, __m128i b)

PANDN xmm, xmm/m128

AndNot(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_andnot_si128 (__m128i a, __m128i b)

PANDN xmm, xmm/m128

AndNot(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_andnot_si128 (__m128i a, __m128i b)

PANDN xmm, xmm/m128

AndNot(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_andnot_si128 (__m128i a, __m128i b)

PANDN xmm, xmm/m128

AndNot(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_andnot_si128 (__m128i a, __m128i b)

PANDN xmm, xmm/m128

AndNot(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_andnot_si128 (__m128i a, __m128i b)

PANDN xmm, xmm/m128

AndNot(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_andnot_si128 (__m128i a, __m128i b)

PANDN xmm, xmm/m128

Average(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_avg_epu8 (__m128i a, __m128i b)

PAVGB xmm, xmm/m128

Average(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_avg_epu16 (__m128i a, __m128i b)

PAVGW xmm, xmm/m128

CompareEqual(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_cmpeq_epi8 (__m128i a, __m128i b)

PCMPEQB xmm, xmm/m128

CompareEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpeq_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(0)

CompareEqual(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_cmpeq_epi16 (__m128i a, __m128i b)

PCMPEQW xmm, xmm/m128

CompareEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmpeq_epi32 (__m128i a, __m128i b)

PCMPEQD xmm, xmm/m128

CompareEqual(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_cmpeq_epi8 (__m128i a, __m128i b)

PCMPEQB xmm, xmm/m128

CompareEqual(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_cmpeq_epi16 (__m128i a, __m128i b)

PCMPEQW xmm, xmm/m128

CompareEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpeq_epi32 (__m128i a, __m128i b)

PCMPEQD xmm, xmm/m128

CompareGreaterThan(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpgt_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(6)

CompareGreaterThan(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_cmpgt_epi16 (__m128i a, __m128i b)

PCMPGTW xmm, xmm/m128

CompareGreaterThan(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmpgt_epi32 (__m128i a, __m128i b)

PCMPGTD xmm, xmm/m128

CompareGreaterThan(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_cmpgt_epi8 (__m128i a, __m128i b)

PCMPGTB xmm, xmm/m128

CompareGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpge_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(5)

CompareLessThan(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmplt_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(1)

CompareLessThan(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_cmplt_epi16 (__m128i a, __m128i b)

PCMPGTW xmm, xmm/m128

CompareLessThan(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmplt_epi32 (__m128i a, __m128i b)

PCMPGTD xmm, xmm/m128

CompareLessThan(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_cmplt_epi8 (__m128i a, __m128i b)

PCMPGTB xmm, xmm/m128

CompareLessThanOrEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmple_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(2)

CompareNotEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpneq_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(4)

CompareNotGreaterThan(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpngt_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(2)

CompareNotGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpnge_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(1)

CompareNotLessThan(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpnlt_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(5)

CompareNotLessThanOrEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpnle_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(6)

CompareOrdered(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpord_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(7)

CompareScalarEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpeq_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(0)

CompareScalarGreaterThan(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpgt_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(6)

CompareScalarGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpge_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(5)

CompareScalarLessThan(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmplt_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(1)

CompareScalarLessThanOrEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmple_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(2)

CompareScalarNotEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpneq_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(4)

CompareScalarNotGreaterThan(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpngt_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(2)

CompareScalarNotGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpnge_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(1)

CompareScalarNotLessThan(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpnlt_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(5)

CompareScalarNotLessThanOrEqual(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpnle_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(6)

CompareScalarOrdered(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpord_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(7)

CompareScalarOrderedEqual(Vector128<Double>, Vector128<Double>)

int _mm_comieq_sd (__m128d a, __m128d b)

COMISD xmm, xmm/m64

CompareScalarOrderedGreaterThan(Vector128<Double>, Vector128<Double>)

int _mm_comigt_sd (__m128d a, __m128d b)

COMISD xmm, xmm/m64

CompareScalarOrderedGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

int _mm_comige_sd (__m128d a, __m128d b)

COMISD xmm, xmm/m64

CompareScalarOrderedLessThan(Vector128<Double>, Vector128<Double>)

int _mm_comilt_sd (__m128d a, __m128d b)

COMISD xmm, xmm/m64

CompareScalarOrderedLessThanOrEqual(Vector128<Double>, Vector128<Double>)

int _mm_comile_sd (__m128d a, __m128d b)

COMISD xmm, xmm/m64

CompareScalarOrderedNotEqual(Vector128<Double>, Vector128<Double>)

int _mm_comineq_sd (__m128d a, __m128d b)

COMISD xmm, xmm/m64

CompareScalarUnordered(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpunord_sd (__m128d a, __m128d b)

CMPSD xmm, xmm/m64, imm8(3)

CompareScalarUnorderedEqual(Vector128<Double>, Vector128<Double>)

int _mm_ucomieq_sd (__m128d a, __m128d b)

UCOMISD xmm, xmm/m64

CompareScalarUnorderedGreaterThan(Vector128<Double>, Vector128<Double>)

int _mm_ucomigt_sd (__m128d a, __m128d b)

UCOMISD xmm, xmm/m64

CompareScalarUnorderedGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

int _mm_ucomige_sd (__m128d a, __m128d b)

UCOMISD xmm, xmm/m64

CompareScalarUnorderedLessThan(Vector128<Double>, Vector128<Double>)

int _mm_ucomilt_sd (__m128d a, __m128d b)

UCOMISD xmm, xmm/m64

CompareScalarUnorderedLessThanOrEqual(Vector128<Double>, Vector128<Double>)

int _mm_ucomile_sd (__m128d a, __m128d b)

UCOMISD xmm, xmm/m64

CompareScalarUnorderedNotEqual(Vector128<Double>, Vector128<Double>)

int _mm_ucomineq_sd (__m128d a, __m128d b)

UCOMISD xmm, xmm/m64

CompareUnordered(Vector128<Double>, Vector128<Double>)

__m128d _mm_cmpunord_pd (__m128d a, __m128d b)

CMPPD xmm, xmm/m128, imm8(3)

ConvertScalarToVector128Double(Vector128<Double>, Int32)

__m128d _mm_cvtsi32_sd (__m128d a, int b)

CVTSI2SD xmm, reg/m32

ConvertScalarToVector128Double(Vector128<Double>, Vector128<Single>)

__m128d _mm_cvtss_sd (__m128d a, __m128 b)

CVTSS2SD xmm, xmm/m32

ConvertScalarToVector128Int32(Int32)

__m128i _mm_cvtsi32_si128 (int a)

MOVD xmm, reg/m32

ConvertScalarToVector128Single(Vector128<Single>, Vector128<Double>)

__m128 _mm_cvtsd_ss (__m128 a, __m128d b)

CVTSD2SS xmm, xmm/m64

ConvertScalarToVector128UInt32(UInt32)

__m128i _mm_cvtsi32_si128 (int a)

MOVD xmm, reg/m32

ConvertToInt32(Vector128<Double>)

int _mm_cvtsd_si32 (__m128d a)

CVTSD2SI r32, xmm/m64

ConvertToInt32(Vector128<Int32>)

int _mm_cvtsi128_si32 (__m128i a)

MOVD reg/m32, xmm

ConvertToInt32WithTruncation(Vector128<Double>)

_mm_cvttsd_si32 int (__m128d a)

CVTTSD2SI reg, xmm/m64

ConvertToUInt32(Vector128<UInt32>)

int _mm_cvtsi128_si32 (__m128i a)

MOVD reg/m32, xmm

ConvertToVector128Double(Vector128<Int32>)

__m128d _mm_cvtepi32_pd (__m128i a)

CVTDQ2PD xmm, xmm/m128

ConvertToVector128Double(Vector128<Single>)

__m128d _mm_cvtps_pd (__m128 a)

CVTPS2PD xmm, xmm/m128

ConvertToVector128Int32(Vector128<Double>)

__m128i _mm_cvtpd_epi32 (__m128d a)

CVTPD2DQ xmm, xmm/m128

ConvertToVector128Int32(Vector128<Single>)

__m128i _mm_cvtps_epi32 (__m128 a)

CVTPS2DQ xmm, xmm/m128

ConvertToVector128Int32WithTruncation(Vector128<Double>)

__m128i _mm_cvttpd_epi32 (__m128d a)

CVTTPD2DQ xmm, xmm/m128

ConvertToVector128Int32WithTruncation(Vector128<Single>)

__m128i _mm_cvttps_epi32 (__m128 a)

CVTTPS2DQ xmm, xmm/m128

ConvertToVector128Single(Vector128<Double>)

__m128 _mm_cvtpd_ps (__m128d a)

CVTPD2PS xmm, xmm/m128

ConvertToVector128Single(Vector128<Int32>)

__m128 _mm_cvtepi32_ps (__m128i a)

CVTDQ2PS xmm, xmm/m128

Divide(Vector128<Double>, Vector128<Double>)

__m128d _mm_div_pd (__m128d a, __m128d b)

DIVPD xmm, xmm/m128

DivideScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_div_sd (__m128d a, __m128d b)

DIVSD xmm, xmm/m64

Equals(Object)

Określa, czy dany obiekt jest taki sam, jak bieżący obiekt.

(Odziedziczone po Object)
Extract(Vector128<UInt16>, Byte)

int _mm_extract_epi16 (__m128i od razu)

PEXTRW reg, xmm, imm8

GetHashCode()

Służy jako domyślna funkcja skrótu.

(Odziedziczone po Object)
GetType()

Type Pobiera bieżące wystąpienie.

(Odziedziczone po Object)
Insert(Vector128<Int16>, Int16, Byte)

__m128i _mm_insert_epi16 (__m128i, int i, int natychmiast)

PINSRW xmm, reg/m16, imm8

Insert(Vector128<UInt16>, UInt16, Byte)

__m128i _mm_insert_epi16 (__m128i, int i, int natychmiast)

PINSRW xmm, reg/m16, imm8

LoadAlignedVector128(Byte*)

__m128i _mm_load_si128 (__m128i const* mem_address)

MOVDQA xmm, m128

LoadAlignedVector128(Double*)

__m128d _mm_load_pd (double const* mem_address)

MOVAPD xmm, m128

LoadAlignedVector128(Int16*)

__m128i _mm_load_si128 (__m128i const* mem_address)

MOVDQA xmm, m128

LoadAlignedVector128(Int32*)

__m128i _mm_load_si128 (__m128i const* mem_address)

MOVDQA xmm, m128

LoadAlignedVector128(Int64*)

__m128i _mm_load_si128 (__m128i const* mem_address)

MOVDQA xmm, m128

LoadAlignedVector128(SByte*)

__m128i _mm_load_si128 (__m128i const* mem_address)

MOVDQA xmm, m128

LoadAlignedVector128(UInt16*)

__m128i _mm_load_si128 (__m128i const* mem_address)

MOVDQA xmm, m128

LoadAlignedVector128(UInt32*)

__m128i _mm_load_si128 (__m128i const* mem_address)

MOVDQA xmm, m128

LoadAlignedVector128(UInt64*)

__m128i _mm_load_si128 (__m128i const* mem_address)

MOVDQA xmm, m128

LoadFence()

void _mm_lfence(void)

LFENCE

LoadHigh(Vector128<Double>, Double*)

__m128d _mm_loadh_pd (__m128d, podwójne const* mem_addr)

MOVHPD xmm, m64

LoadLow(Vector128<Double>, Double*)

__m128d _mm_loadl_pd (__m128d mem_addr podwójnej mem_addr)

MOVLPD xmm, m64

LoadScalarVector128(Double*)

__m128d _mm_load_sd (double const* mem_address)

MOVSD xmm, m64

LoadScalarVector128(Int32*)

__m128i _mm_loadl_epi32 (__m128i const* mem_addr)

MOVD xmm, reg/m32

LoadScalarVector128(Int64*)

__m128i _mm_loadl_epi64 (__m128i const* mem_addr)

MOVQ xmm, reg/m64

LoadScalarVector128(UInt32*)

__m128i _mm_loadl_epi32 (__m128i const* mem_addr)

MOVD xmm, reg/m32

LoadScalarVector128(UInt64*)

__m128i _mm_loadl_epi64 (__m128i const* mem_addr)

MOVQ xmm, reg/m64

LoadVector128(Byte*)

__m128i _mm_loadu_si128 (__m128i const* mem_address)

MOVDQU xmm, m128

LoadVector128(Double*)

__m128d _mm_loadu_pd (podwójne const* mem_address)

MOVUPD xmm, m128

LoadVector128(Int16*)

__m128i _mm_loadu_si128 (__m128i const* mem_address)

MOVDQU xmm, m128

LoadVector128(Int32*)

__m128i _mm_loadu_si128 (__m128i const* mem_address)

MOVDQU xmm, m128

LoadVector128(Int64*)

__m128i _mm_loadu_si128 (__m128i const* mem_address)

MOVDQU xmm, m128

LoadVector128(SByte*)

__m128i _mm_loadu_si128 (__m128i const* mem_address)

MOVDQU xmm, m128

LoadVector128(UInt16*)

__m128i _mm_loadu_si128 (__m128i const* mem_address)

MOVDQU xmm, m128

LoadVector128(UInt32*)

__m128i _mm_loadu_si128 (__m128i const* mem_address)

MOVDQU xmm, m128

LoadVector128(UInt64*)

__m128i _mm_loadu_si128 (__m128i const* mem_address)

MOVDQU xmm, m128

MaskMove(Vector128<Byte>, Vector128<Byte>, Byte*)

void _mm_maskmoveu_si128 (__m128i maska __m128i, char* mem_address)

MASKMOVDQU xmm, xmm

MaskMove(Vector128<SByte>, Vector128<SByte>, SByte*)

void _mm_maskmoveu_si128 (__m128i maska __m128i, char* mem_address)

MASKMOVDQU xmm, xmm

Max(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_max_epu8 (__m128i a, __m128i b)

PMAXUB xmm, xmm/m128

Max(Vector128<Double>, Vector128<Double>)

__m128d _mm_max_pd (__m128d a, __m128d b)

MAXPD xmm, xmm/m128

Max(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_max_epi16 (__m128i a, __m128i b)

PMAXSW xmm, xmm/m128

MaxScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_max_sd (__m128d a, __m128d b)

MAXSD xmm, xmm/m64

MemberwiseClone()

Tworzy płytkią kopię bieżącego Objectelementu .

(Odziedziczone po Object)
MemoryFence()

void _mm_mfence(void)

MFENCE

Min(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_min_epu8 (__m128i a, __m128i b)

PMINUB xmm, xmm/m128

Min(Vector128<Double>, Vector128<Double>)

__m128d _mm_min_pd (__m128d a, __m128d b)

MINPD xmm, xmm/m128

Min(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_min_epi16 (__m128i a, __m128i b)

PMINSW xmm, xmm/m128

MinScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_min_sd (__m128d a, __m128d b)

MINSD xmm, xmm/m64

MoveMask(Vector128<Byte>)

int _mm_movemask_epi8 (__m128i a)

PMOVMSKB reg, xmm

MoveMask(Vector128<Double>)

int _mm_movemask_pd (__m128d a)

MOVMSKPD reg, xmm

MoveMask(Vector128<SByte>)

int _mm_movemask_epi8 (__m128i a)

PMOVMSKB reg, xmm

MoveScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_move_sd (__m128d a, __m128d b)

MOVSD xmm, xmm

MoveScalar(Vector128<Int64>)

__m128i _mm_move_epi64 (__m128i a)

MOVQ xmm, xmm

MoveScalar(Vector128<UInt64>)

__m128i _mm_move_epi64 (__m128i a)

MOVQ xmm, xmm

Multiply(Vector128<Double>, Vector128<Double>)

__m128d _mm_mul_pd (__m128d a, __m128d b)

MULPD xmm, xmm/m128

Multiply(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mul_epu32 (__m128i a, __m128i b)

PMULUDQ xmm, xmm/m128

MultiplyAddAdjacent(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_madd_epi16 (__m128i a, __m128i b)

PMADDWD xmm, xmm/m128

MultiplyHigh(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mulhi_epi16 (__m128i a, __m128i b)

PMULHW xmm, xmm/m128

MultiplyHigh(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mulhi_epu16 (__m128i a, __m128i b)

PMULHUW xmm, xmm/m128

MultiplyLow(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mullo_epi16 (__m128i a, __m128i b)

PMULLW xmm, xmm/m128

MultiplyLow(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mullo_epi16 (__m128i a, __m128i b)

PMULLW xmm, xmm/m128

MultiplyScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_mul_sd (__m128d a, __m128d b)

MULSD xmm, xmm/m64

Or(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_or_si128 (__m128i a, __m128i b)

POR xmm, xmm/m128

Or(Vector128<Double>, Vector128<Double>)

__m128d _mm_or_pd (__m128d a, __m128d b)

ORPD xmm, xmm/m128

Or(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_or_si128 (__m128i a, __m128i b)

POR xmm, xmm/m128

Or(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_or_si128 (__m128i a, __m128i b)

POR xmm, xmm/m128

Or(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_or_si128 (__m128i a, __m128i b)

POR xmm, xmm/m128

Or(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_or_si128 (__m128i a, __m128i b)

POR xmm, xmm/m128

Or(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_or_si128 (__m128i a, __m128i b)

POR xmm, xmm/m128

Or(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_or_si128 (__m128i a, __m128i b)

POR xmm, xmm/m128

Or(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_or_si128 (__m128i a, __m128i b)

POR xmm, xmm/m128

PackSignedSaturate(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_packs_epi16 (__m128i a, __m128i b)

PACKSSWB xmm, xmm/m128

PackSignedSaturate(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_packs_epi32 (__m128i a, __m128i b)

PACKSSDW xmm, xmm/m128

PackUnsignedSaturate(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_packus_epi16 (__m128i a, __m128i b)

PACKUSWB xmm, xmm/m128

ShiftLeftLogical(Vector128<Int16>, Byte)

__m128i _mm_slli_epi16 (__m128i od razu)

PSLLW xmm, imm8

ShiftLeftLogical(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_sll_epi16 (__m128i liczba __m128i)

PSLLW xmm, xmm/m128

ShiftLeftLogical(Vector128<Int32>, Byte)

__m128i _mm_slli_epi32 (__m128i od razu)

PSLLD xmm, imm8

ShiftLeftLogical(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_sll_epi32 (__m128i liczba __m128i)

PSLLD xmm, xmm/m128

ShiftLeftLogical(Vector128<Int64>, Byte)

__m128i _mm_slli_epi64 (__m128i od razu)

PSLLQ xmm, imm8

ShiftLeftLogical(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_sll_epi64 (__m128i liczba __m128i)

PSLLQ xmm, xmm/m128

ShiftLeftLogical(Vector128<UInt16>, Byte)

__m128i _mm_slli_epi16 (__m128i od razu)

PSLLW xmm, imm8

ShiftLeftLogical(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_sll_epi16 (__m128i liczba __m128i)

PSLLW xmm, xmm/m128

ShiftLeftLogical(Vector128<UInt32>, Byte)

__m128i _mm_slli_epi32 (__m128i od razu)

PSLLD xmm, imm8

ShiftLeftLogical(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_sll_epi32 (__m128i liczba __m128i)

PSLLD xmm, xmm/m128

ShiftLeftLogical(Vector128<UInt64>, Byte)

__m128i _mm_slli_epi64 (__m128i od razu)

PSLLQ xmm, imm8

ShiftLeftLogical(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_sll_epi64 (__m128i liczba __m128i)

PSLLQ xmm, xmm/m128

ShiftLeftLogical128BitLane(Vector128<Byte>, Byte)

__m128i _mm_bslli_si128 (__m128i int imm8)

PSLLDQ xmm, imm8

ShiftLeftLogical128BitLane(Vector128<Int16>, Byte)

__m128i _mm_bslli_si128 (__m128i int imm8)

PSLLDQ xmm, imm8

ShiftLeftLogical128BitLane(Vector128<Int32>, Byte)

__m128i _mm_bslli_si128 (__m128i int imm8)

PSLLDQ xmm, imm8

ShiftLeftLogical128BitLane(Vector128<Int64>, Byte)

__m128i _mm_bslli_si128 (__m128i int imm8)

PSLLDQ xmm, imm8

ShiftLeftLogical128BitLane(Vector128<SByte>, Byte)

__m128i _mm_bslli_si128 (__m128i int imm8)

PSLLDQ xmm, imm8

ShiftLeftLogical128BitLane(Vector128<UInt16>, Byte)

__m128i _mm_bslli_si128 (__m128i int imm8)

PSLLDQ xmm, imm8

ShiftLeftLogical128BitLane(Vector128<UInt32>, Byte)

__m128i _mm_bslli_si128 (__m128i int imm8)

PSLLDQ xmm, imm8

ShiftLeftLogical128BitLane(Vector128<UInt64>, Byte)

__m128i _mm_bslli_si128 (__m128i int imm8)

PSLLDQ xmm, imm8

ShiftRightArithmetic(Vector128<Int16>, Byte)

__m128i _mm_srai_epi16 (__m128i od razu)

PSRAW xmm, imm8

ShiftRightArithmetic(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_sra_epi16 (__m128i __m128i liczba)

PSRAW xmm, xmm/m128

ShiftRightArithmetic(Vector128<Int32>, Byte)

__m128i _mm_srai_epi32 (__m128i od razu)

PSRAD xmm, imm8

ShiftRightArithmetic(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_sra_epi32 (__m128i liczba __m128i)

PSRAD xmm, xmm/m128

ShiftRightLogical(Vector128<Int16>, Byte)

__m128i _mm_srli_epi16 (__m128i od razu)

PSRLW xmm, imm8

ShiftRightLogical(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_srl_epi16 (__m128i liczba __m128i)

PSRLW xmm, xmm/m128

ShiftRightLogical(Vector128<Int32>, Byte)

__m128i _mm_srli_epi32 (__m128i od razu)

PSRLD xmm, imm8

ShiftRightLogical(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_srl_epi32 (__m128i liczba __m128i)

PSRLD xmm, xmm/m128

ShiftRightLogical(Vector128<Int64>, Byte)

__m128i _mm_srli_epi64 (__m128i od razu)

PSRLQ xmm, imm8

ShiftRightLogical(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_srl_epi64 (__m128i liczba __m128i)

PSRLQ xmm, xmm/m128

ShiftRightLogical(Vector128<UInt16>, Byte)

__m128i _mm_srli_epi16 (__m128i od razu)

PSRLW xmm, imm8

ShiftRightLogical(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_srl_epi16 (__m128i liczba __m128i)

PSRLW xmm, xmm/m128

ShiftRightLogical(Vector128<UInt32>, Byte)

__m128i _mm_srli_epi32 (__m128i od razu)

PSRLD xmm, imm8

ShiftRightLogical(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_srl_epi32 (__m128i liczba __m128i)

PSRLD xmm, xmm/m128

ShiftRightLogical(Vector128<UInt64>, Byte)

__m128i _mm_srli_epi64 (__m128i od razu)

PSRLQ xmm, imm8

ShiftRightLogical(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_srl_epi64 (__m128i liczba __m128i)

PSRLQ xmm, xmm/m128

ShiftRightLogical128BitLane(Vector128<Byte>, Byte)

__m128i _mm_bsrli_si128 (__m128i int imm8)

PSRLDQ xmm, imm8

ShiftRightLogical128BitLane(Vector128<Int16>, Byte)

__m128i _mm_bsrli_si128 (__m128i int imm8)

PSRLDQ xmm, imm8

ShiftRightLogical128BitLane(Vector128<Int32>, Byte)

__m128i _mm_bsrli_si128 (__m128i int imm8)

PSRLDQ xmm, imm8

ShiftRightLogical128BitLane(Vector128<Int64>, Byte)

__m128i _mm_bsrli_si128 (__m128i int imm8)

PSRLDQ xmm, imm8

ShiftRightLogical128BitLane(Vector128<SByte>, Byte)

__m128i _mm_bsrli_si128 (__m128i int imm8)

PSRLDQ xmm, imm8

ShiftRightLogical128BitLane(Vector128<UInt16>, Byte)

__m128i _mm_bsrli_si128 (__m128i int imm8)

PSRLDQ xmm, imm8

ShiftRightLogical128BitLane(Vector128<UInt32>, Byte)

__m128i _mm_bsrli_si128 (__m128i int imm8)

PSRLDQ xmm, imm8

ShiftRightLogical128BitLane(Vector128<UInt64>, Byte)

__m128i _mm_bsrli_si128 (__m128i int imm8)

PSRLDQ xmm, imm8

Shuffle(Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_shuffle_pd (__m128d a, __m128d b, od razu)

SHUFPD xmm, xmm/m128, imm8

Shuffle(Vector128<Int32>, Byte)

__m128i _mm_shuffle_epi32 (__m128i od razu)

PSHUFD xmm, xmm/m128, imm8

Shuffle(Vector128<UInt32>, Byte)

__m128i _mm_shuffle_epi32 (__m128i od razu)

PSHUFD xmm, xmm/m128, imm8

ShuffleHigh(Vector128<Int16>, Byte)

__m128i _mm_shufflehi_epi16 (__m128i od razu)

PSHUFHW xmm, xmm/m128, imm8

ShuffleHigh(Vector128<UInt16>, Byte)

__m128i _mm_shufflehi_epi16 (__m128i kontrolki int)

PSHUFHW xmm, xmm/m128, imm8

ShuffleLow(Vector128<Int16>, Byte)

__m128i _mm_shufflelo_epi16 (__m128i kontrolki int)

PSHUFLW xmm, xmm/m128, imm8

ShuffleLow(Vector128<UInt16>, Byte)

__m128i _mm_shufflelo_epi16 (__m128i kontrolki int)

PSHUFLW xmm, xmm/m128, imm8

Sqrt(Vector128<Double>)

__m128d _mm_sqrt_pd (__m128d a)

SQRTPD xmm, xmm/m128

SqrtScalar(Vector128<Double>)

__m128d _mm_sqrt_sd (__m128d a)

SQRTSD xmm, xmm/64

SqrtScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_sqrt_sd (__m128d a, __m128d b)

SQRTSD xmm, xmm/64

Store(Byte*, Vector128<Byte>)

void _mm_storeu_si128 (__m128i* mem_addr, __m128i a)

MOVDQU m128, xmm

Store(Double*, Vector128<Double>)

void _mm_storeu_pd (double* mem_addr, __m128d a)

MOVUPD m128, xmm

Store(Int16*, Vector128<Int16>)

void _mm_storeu_si128 (__m128i* mem_addr, __m128i a)

MOVDQU m128, xmm

Store(Int32*, Vector128<Int32>)

void _mm_storeu_si128 (__m128i* mem_addr, __m128i a)

MOVDQU m128, xmm

Store(Int64*, Vector128<Int64>)

void _mm_storeu_si128 (__m128i* mem_addr, __m128i a)

MOVDQU m128, xmm

Store(SByte*, Vector128<SByte>)

void _mm_storeu_si128 (__m128i* mem_addr, __m128i a)

MOVDQU m128, xmm

Store(UInt16*, Vector128<UInt16>)

void _mm_storeu_si128 (__m128i* mem_addr, __m128i a)

MOVDQU m128, xmm

Store(UInt32*, Vector128<UInt32>)

void _mm_storeu_si128 (__m128i* mem_addr, __m128i a)

MOVDQU m128, xmm

Store(UInt64*, Vector128<UInt64>)

void _mm_storeu_si128 (__m128i* mem_addr, __m128i a)

MOVDQU m128, xmm

StoreAligned(Byte*, Vector128<Byte>)

void _mm_store_si128 (__m128i* mem_addr, __m128i a)

MOVDQA m128, xmm

StoreAligned(Double*, Vector128<Double>)

void _mm_store_pd (double* mem_addr, __m128d a)

MOVAPD m128, xmm

StoreAligned(Int16*, Vector128<Int16>)

void _mm_store_si128 (__m128i* mem_addr, __m128i a)

MOVDQA m128, xmm

StoreAligned(Int32*, Vector128<Int32>)

void _mm_store_si128 (__m128i* mem_addr, __m128i a)

MOVDQA m128, xmm

StoreAligned(Int64*, Vector128<Int64>)

void _mm_store_si128 (__m128i* mem_addr, __m128i a)

MOVDQA m128, xmm

StoreAligned(SByte*, Vector128<SByte>)

void _mm_store_si128 (__m128i* mem_addr, __m128i a)

MOVDQA m128, xmm

StoreAligned(UInt16*, Vector128<UInt16>)

void _mm_store_si128 (__m128i* mem_addr, __m128i a)

MOVDQA m128, xmm

StoreAligned(UInt32*, Vector128<UInt32>)

void _mm_store_si128 (__m128i* mem_addr, __m128i a)

MOVDQA m128, xmm

StoreAligned(UInt64*, Vector128<UInt64>)

void _mm_store_si128 (__m128i* mem_addr, __m128i a)

MOVDQA m128, xmm

StoreAlignedNonTemporal(Byte*, Vector128<Byte>)

void _mm_stream_si128 (__m128i* mem_addr, __m128i a)

MOVNTDQ m128, xmm

StoreAlignedNonTemporal(Double*, Vector128<Double>)

void _mm_stream_pd (double* mem_addr, __m128d a)

MOVNTPD m128, xmm

StoreAlignedNonTemporal(Int16*, Vector128<Int16>)

void _mm_stream_si128 (__m128i* mem_addr, __m128i a)

MOVNTDQ m128, xmm

StoreAlignedNonTemporal(Int32*, Vector128<Int32>)

void _mm_stream_si128 (__m128i* mem_addr, __m128i a)

MOVNTDQ m128, xmm

StoreAlignedNonTemporal(Int64*, Vector128<Int64>)

void _mm_stream_si128 (__m128i* mem_addr, __m128i a)

MOVNTDQ m128, xmm

StoreAlignedNonTemporal(SByte*, Vector128<SByte>)

void _mm_stream_si128 (__m128i* mem_addr, __m128i a)

MOVNTDQ m128, xmm

StoreAlignedNonTemporal(UInt16*, Vector128<UInt16>)

void _mm_stream_si128 (__m128i* mem_addr, __m128i a)

MOVNTDQ m128, xmm

StoreAlignedNonTemporal(UInt32*, Vector128<UInt32>)

void _mm_stream_si128 (__m128i* mem_addr, __m128i a)

MOVNTDQ m128, xmm

StoreAlignedNonTemporal(UInt64*, Vector128<UInt64>)

void _mm_stream_si128 (__m128i* mem_addr, __m128i a)

MOVNTDQ m128, xmm

StoreHigh(Double*, Vector128<Double>)

void _mm_storeh_pd (double* mem_addr, __m128d a)

MOVHPD m64, xmm

StoreLow(Double*, Vector128<Double>)

void _mm_storel_pd (double* mem_addr, __m128d a)

MOVLPD m64, xmm

StoreNonTemporal(Int32*, Int32)

void _mm_stream_si32(int *p, int a)

MOVNTI m32, r32

StoreNonTemporal(UInt32*, UInt32)

void _mm_stream_si32(int *p, int a)

MOVNTI m32, r32

StoreScalar(Double*, Vector128<Double>)

void _mm_store_sd (double* mem_addr, __m128d a)

MOVSD m64, xmm

StoreScalar(Int32*, Vector128<Int32>)

void _mm_storeu_si32 (void* mem_addr, __m128i a) MOVD m32, xmm.

StoreScalar(Int64*, Vector128<Int64>)

void _mm_storel_epi64 (__m128i* mem_addr, __m128i a)

MOVQ m64, xmm

StoreScalar(UInt32*, Vector128<UInt32>)

void _mm_storeu_si32 (void* mem_addr, __m128i a) MOVD m32, xmm.

StoreScalar(UInt64*, Vector128<UInt64>)

void _mm_storel_epi64 (__m128i* mem_addr, __m128i a)

MOVQ m64, xmm

Subtract(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_sub_epi8 (__m128i a, __m128i b)

PSUBB xmm, xmm/m128

Subtract(Vector128<Double>, Vector128<Double>)

__m128d _mm_sub_pd (__m128d a, __m128d b)

SUBPD xmm, xmm/m128

Subtract(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_sub_epi16 (__m128i a, __m128i b)

PSUBW xmm, xmm/m128

Subtract(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_sub_epi32 (__m128i a, __m128i b)

PsUBD xmm, xmm/m128

Subtract(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_sub_epi64 (__m128i a, __m128i b)

PSUBQ xmm, xmm/m128

Subtract(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_sub_epi8 (__m128i a, __m128i b)

PSUBB xmm, xmm/m128

Subtract(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_sub_epi16 (__m128i a, __m128i b)

PSUBW xmm, xmm/m128

Subtract(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_sub_epi32 (__m128i a, __m128i b)

PsUBD xmm, xmm/m128

Subtract(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_sub_epi64 (__m128i a, __m128i b)

PSUBQ xmm, xmm/m128

SubtractSaturate(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_subs_epu8 (__m128i a, __m128i b)

PSUBUSB xmm, xmm/m128

SubtractSaturate(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_subs_epi16 (__m128i a, __m128i b)

PSUBSW xmm, xmm/m128

SubtractSaturate(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_subs_epi8 (__m128i a, __m128i b)

PSUBSB xmm, xmm/m128

SubtractSaturate(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_subs_epu16 (__m128i a, __m128i b)

PSUBUSW xmm, xmm/m128

SubtractScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_sub_sd (__m128d a, __m128d b)

SUBSD xmm, xmm/m64

SumAbsoluteDifferences(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_sad_epu8 (__m128i a, __m128i b)

PSADBW xmm, xmm/m128

ToString()

Zwraca ciąg reprezentujący bieżący obiekt.

(Odziedziczone po Object)
UnpackHigh(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_unpackhi_epi8 (__m128i a, __m128i b)

PUNPCKHBW xmm, xmm/m128

UnpackHigh(Vector128<Double>, Vector128<Double>)

__m128d _mm_unpackhi_pd (__m128d a, __m128d b)

UNPCKHPD xmm, xmm/m128

UnpackHigh(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_unpackhi_epi16 (__m128i a, __m128i b)

PUNPCKHWD xmm, xmm/m128

UnpackHigh(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_unpackhi_epi32 (__m128i a, __m128i b)

PUNPCKHDQ xmm, xmm/m128

UnpackHigh(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_unpackhi_epi64 (__m128i a, __m128i b)

PUNPCKHQDQ xmm, xmm/m128

UnpackHigh(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_unpackhi_epi8 (__m128i a, __m128i b)

PUNPCKHBW xmm, xmm/m128

UnpackHigh(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_unpackhi_epi16 (__m128i a, __m128i b)

PUNPCKHWD xmm, xmm/m128

UnpackHigh(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_unpackhi_epi32 (__m128i a, __m128i b)

PUNPCKHDQ xmm, xmm/m128

UnpackHigh(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_unpackhi_epi64 (__m128i a, __m128i b)

PUNPCKHQDQ xmm, xmm/m128

UnpackLow(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_unpacklo_epi8 (__m128i a, __m128i b)

PUNPCKLBW xmm, xmm/m128

UnpackLow(Vector128<Double>, Vector128<Double>)

__m128d _mm_unpacklo_pd (__m128d a, __m128d b)

UNPCKLPD xmm, xmm/m128

UnpackLow(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_unpacklo_epi16 (__m128i a, __m128i b)

PUNPCKLWD xmm, xmm/m128

UnpackLow(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_unpacklo_epi32 (__m128i a, __m128i b)

PUNPCKLDQ xmm, xmm/m128

UnpackLow(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_unpacklo_epi64 (__m128i a, __m128i b)

PUNPCKLQDQ xmm, xmm/m128

UnpackLow(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_unpacklo_epi8 (__m128i a, __m128i b)

PUNPCKLBW xmm, xmm/m128

UnpackLow(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_unpacklo_epi16 (__m128i a, __m128i b)

PUNPCKLWD xmm, xmm/m128

UnpackLow(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_unpacklo_epi32 (__m128i a, __m128i b)

PUNPCKLDQ xmm, xmm/m128

UnpackLow(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_unpacklo_epi64 (__m128i a, __m128i b)

PUNPCKLQDQ xmm, xmm/m128

Xor(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_xor_si128 (__m128i a, __m128i b)

PXOR xmm, xmm/m128

Xor(Vector128<Double>, Vector128<Double>)

__m128d _mm_xor_pd (__m128d a, __m128d b)

XORPD xmm, xmm/m128

Xor(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_xor_si128 (__m128i a, __m128i b)

PXOR xmm, xmm/m128

Xor(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_xor_si128 (__m128i a, __m128i b)

PXOR xmm, xmm/m128

Xor(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_xor_si128 (__m128i a, __m128i b)

PXOR xmm, xmm/m128

Xor(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_xor_si128 (__m128i a, __m128i b)

PXOR xmm, xmm/m128

Xor(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_xor_si128 (__m128i a, __m128i b)

PXOR xmm, xmm/m128

Xor(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_xor_si128 (__m128i a, __m128i b)

PXOR xmm, xmm/m128

Xor(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_xor_si128 (__m128i a, __m128i b)

PXOR xmm, xmm/m128

Dotyczy