Avx10v1 Class
Important
Some information relates to prerelease product that may be substantially modified before it’s released. Microsoft makes no warranties, express or implied, with respect to the information provided here.
Important
This API is not CLS-compliant.
Provides access to X86 AVX10.1 hardware instructions via intrinsics
public ref class Avx10v1 abstract : System::Runtime::Intrinsics::X86::Avx2
[System.CLSCompliant(false)]
public abstract class Avx10v1 : System.Runtime.Intrinsics.X86.Avx2
[<System.CLSCompliant(false)>]
type Avx10v1 = class
inherit Avx2
Public MustInherit Class Avx10v1
Inherits Avx2
- Inheritance
- Attributes
Is |
Gets a value that indicates whether the APIs in this class are supported. |
Abs(Vector128<Int64>) |
__m128i _mm_abs_epi64 (__m128i a) VPABSQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Abs(Vector256<Int64>) |
__m256i _mm256_abs_epi64 (__m128i a) VPABSQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Add |
__m128d _mm_add_round_sd (__m128d a, __m128d b, int rounding) VADDSD xmm1, xmm2, xmm3 {er} |
Add |
__m128 _mm_add_round_ss (__m128 a, __m128 b, int rounding) VADDSS xmm1, xmm2, xmm3 {er} |
Align |
__m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count) VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 |
Align |
__m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count) VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 |
Align |
__m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count) VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Align |
__m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count) VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Align |
__m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count) VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 |
Align |
__m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count) VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 |
Align |
__m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count) VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
Align |
__m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count) VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
Broadcast |
__m128i _mm_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64 |
Broadcast |
__m128i _mm_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64 |
Broadcast |
__m256i _mm256_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64 |
Broadcast |
__m256 _mm256_broadcast_f32x2 (__m128 a) VBROADCASTF32x2 ymm1 {k1}{z}, xmm2/m64 |
Broadcast |
__m256i _mm256_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64 |
Compare |
__m128i _mm_cmpgt_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(6) |
Compare |
__m128i _mm_cmpgt_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(6) |
Compare |
__m128i _mm_cmpgt_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6) |
Compare |
__m128i _mm_cmpgt_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6) |
Compare |
__m256i _mm256_cmpgt_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(6) |
Compare |
__m256i _mm256_cmpgt_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(6) |
Compare |
__m256i _mm256_cmpgt_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(6) |
Compare |
__m256i _mm256_cmpgt_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6) |
Compare |
__m128i _mm_cmpge_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(5) |
Compare |
__m128i _mm_cmpge_epi16 (__m128i a, __m128i b) VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(5) |
Compare |
__m128i _mm_cmpge_epi32 (__m128i a, __m128i b) VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5) |
Compare |
__m128i _mm_cmpge_epi64 (__m128i a, __m128i b) VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5) |
Compare |
__m128i _mm_cmpge_epi8 (__m128i a, __m128i b) VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(5) |
Compare |
__m128i _mm_cmpge_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(5) |
Compare |
__m128i _mm_cmpge_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5) |
Compare |
__m128i _mm_cmpge_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5) |
Compare |
__m256i _mm256_cmpge_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(5) |
Compare |
__m256i _mm256_cmpge_epi16 (__m256i a, __m256i b) VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(5) |
Compare |
__m256i _mm256_cmpge_epi32 (__m256i a, __m256i b) VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5) |
Compare |
__m256i _mm256_cmpge_epi64 (__m256i a, __m256i b) VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5) |
Compare |
__m256i _mm256_cmpge_epi8 (__m256i a, __m256i b) VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(5) |
Compare |
__m256i _mm256_cmpge_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(5) |
Compare |
__m256i _mm256_cmpge_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5) |
Compare |
__m256i _mm256_cmpge_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5) |
Compare |
__m128i _mm_cmplt_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(1) |
Compare |
__m128i _mm_cmplt_epi16 (__m128i a, __m128i b) VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(1) |
Compare |
__m128i _mm_cmplt_epi32 (__m128i a, __m128i b) VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1) |
Compare |
__m128i _mm_cmplt_epi64 (__m128i a, __m128i b) VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1) |
Compare |
__m128i _mm_cmplt_epi8 (__m128i a, __m128i b) VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(1) |
Compare |
__m128i _mm_cmplt_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(1) |
Compare |
__m128i _mm_cmplt_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1) |
Compare |
__m128i _mm_cmplt_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1) |
Compare |
__m256i _mm256_cmplt_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(1) |
Compare |
__m256i _mm256_cmplt_epi16 (__m256i a, __m256i b) VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(1) |
Compare |
__m256i _mm256_cmplt_epi32 (__m256i a, __m256i b) VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1) |
Compare |
__m256i _mm256_cmplt_epi64 (__m256i a, __m256i b) VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1) |
Compare |
__m256i _mm256_cmplt_epi8 (__m256i a, __m256i b) VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(1) |
Compare |
__m256i _mm256_cmplt_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(1) |
Compare |
__m256i _mm256_cmplt_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1) |
Compare |
__m256i _mm256_cmplt_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1) |
Compare |
__m128i _mm_cmple_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(2) |
Compare |
__m128i _mm_cmple_epi16 (__m128i a, __m128i b) VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(2) |
Compare |
__m128i _mm_cmple_epi32 (__m128i a, __m128i b) VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2) |
Compare |
__m128i _mm_cmple_epi64 (__m128i a, __m128i b) VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2) |
Compare |
__m128i _mm_cmple_epi8 (__m128i a, __m128i b) VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(2) |
Compare |
__m128i _mm_cmple_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(2) |
Compare |
__m128i _mm_cmple_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2) |
Compare |
__m128i _mm_cmple_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2) |
Compare |
__m256i _mm256_cmple_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(2) |
Compare |
__m256i _mm256_cmple_epi16 (__m256i a, __m256i b) VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(2) |
Compare |
__m256i _mm256_cmple_epi32 (__m256i a, __m256i b) VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2) |
Compare |
__m256i _mm256_cmple_epi64 (__m256i a, __m256i b) VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2) |
Compare |
__m256i _mm256_cmple_epi8 (__m256i a, __m256i b) VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(2) |
Compare |
__m256i _mm256_cmple_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(2) |
Compare |
__m256i _mm256_cmple_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2) |
Compare |
__m256i _mm256_cmple_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2) |
Compare |
__m128i _mm_cmpne_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(4) |
Compare |
__m128i _mm_cmpne_epi16 (__m128i a, __m128i b) VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(4) |
Compare |
__m128i _mm_cmpne_epi32 (__m128i a, __m128i b) VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4) |
Compare |
__m128i _mm_cmpne_epi64 (__m128i a, __m128i b) VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4) |
Compare |
__m128i _mm_cmpne_epi8 (__m128i a, __m128i b) VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(4) |
Compare |
__m128i _mm_cmpne_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(4) |
Compare |
__m128i _mm_cmpne_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4) |
Compare |
__m128i _mm_cmpne_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4) |
Compare |
__m256i _mm256_cmpne_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(4) |
Compare |
__m256i _mm256_cmpne_epi16 (__m256i a, __m256i b) VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(4) |
Compare |
__m256i _mm256_cmpne_epi32 (__m256i a, __m256i b) VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4) |
Compare |
__m256i _mm256_cmpne_epi64 (__m256i a, __m256i b) VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4) |
Compare |
__m256i _mm256_cmpne_epi8 (__m256i a, __m256i b) VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(4) |
Compare |
__m256i _mm256_cmpne_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(4) |
Compare |
__m256i _mm256_cmpne_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4) |
Compare |
__m256i _mm256_cmpne_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4) |
Convert |
__m128d _mm_cvtsi32_sd (__m128d a, int b) VCVTUSI2SD xmm1, xmm2, r/m32 |
Convert |
__m128 _mm_cvt_roundi32_ss (__m128 a, int b, int rounding) VCVTSI2SS xmm1, xmm2, r32 {er} |
Convert |
__m128 _mm_cvt_roundi32_ss (__m128 a, int b, int rounding) VCVTUSI2SS xmm1, xmm2, r32 {er} |
Convert |
__m128 _mm_cvtsi32_ss (__m128 a, int b) VCVTUSI2SS xmm1, xmm2, r/m32 |
Convert |
__m128 _mm_cvt_roundsd_ss (__m128 a, __m128d b, int rounding) VCVTSD2SS xmm1, xmm2, xmm3 {er} |
Convert |
int _mm_cvt_roundsd_i32 (__m128d a, int rounding) VCVTSD2SI r32, xmm1 {er} |
Convert |
int _mm_cvt_roundss_i32 (__m128 a, int rounding) VCVTSS2SIK r32, xmm1 {er} |
Convert |
unsigned int _mm_cvt_roundsd_u32 (__m128d a, int rounding) VCVTSD2USI r32, xmm1 {er} |
Convert |
unsigned int _mm_cvtsd_u32 (__m128d a) VCVTSD2USI r32, xmm1/m64{er} |
Convert |
unsigned int _mm_cvt_roundss_u32 (__m128 a, int rounding) VCVTSS2USI r32, xmm1 {er} |
Convert |
unsigned int _mm_cvtss_u32 (__m128 a) VCVTSS2USI r32, xmm1/m32{er} |
Convert |
unsigned int _mm_cvttsd_u32 (__m128d a) VCVTTSD2USI r32, xmm1/m64{er} |
Convert |
unsigned int _mm_cvttss_u32 (__m128 a) VCVTTSS2USI r32, xmm1/m32{er} |
Convert |
__m128i _mm_cvtepi16_epi8 (__m128i a) VPMOVWB xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi32_epi8 (__m128i a) VPMOVDB xmm1/m32 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi64_epi8 (__m128i a) VPMOVQB xmm1/m16 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi16_epi8 (__m128i a) VPMOVWB xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi32_epi8 (__m128i a) VPMOVDB xmm1/m32 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi64_epi8 (__m128i a) VPMOVQB xmm1/m16 {k1}{z}, xmm2 |
Convert |
__m128i _mm256_cvtepi16_epi8 (__m256i a) VPMOVWB xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi32_epi8 (__m256i a) VPMOVDB xmm1/m64 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi64_epi8 (__m256i a) VPMOVQB xmm1/m32 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi16_epi8 (__m256i a) VPMOVWB xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi32_epi8 (__m256i a) VPMOVDB xmm1/m64 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi64_epi8 (__m256i a) VPMOVQB xmm1/m32 {k1}{z}, ymm2 |
Convert |
__m128i _mm_cvtusepi16_epi8 (__m128i a) VPMOVUWB xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtusepi32_epi8 (__m128i a) VPMOVUSDB xmm1/m32 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtusepi64_epi8 (__m128i a) VPMOVUSQB xmm1/m16 {k1}{z}, xmm2 |
Convert |
__m128i _mm256_cvtusepi16_epi8 (__m256i a) VPMOVUWB xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtusepi32_epi8 (__m256i a) VPMOVUSDB xmm1/m64 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtusepi64_epi8 (__m256i a) VPMOVUSQB xmm1/m32 {k1}{z}, ymm2 |
Convert |
__m128d _mm_cvtepi64_pd (__m128i a) VCVTQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128d _mm_cvtepu32_pd (__m128i a) VCVTUDQ2PD xmm1 {k1}{z}, xmm2/m64/m32bcst |
Convert |
__m128d _mm_cvtepu64_pd (__m128i a) VCVTUQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128i _mm_cvtepi32_epi16 (__m128i a) VPMOVDW xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi64_epi16 (__m128i a) VPMOVQW xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi32_epi16 (__m128i a) VPMOVDW xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi64_epi16 (__m128i a) VPMOVQW xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi32_epi16 (__m256i a) VPMOVDW xmm1/m128 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi64_epi16 (__m256i a) VPMOVQW xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi32_epi16 (__m256i a) VPMOVDW xmm1/m128 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi64_epi16 (__m256i a) VPMOVQW xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtsepi32_epi16 (__m128i a) VPMOVSDW xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtsepi64_epi16 (__m128i a) VPMOVSQW xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtsepi32_epi16 (__m256i a) VPMOVSDW xmm1/m128 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtsepi64_epi16 (__m256i a) VPMOVSQW xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi64_epi32 (__m128i a) VPMOVQD xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi64_epi32 (__m128i a) VPMOVQD xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm256_cvtepi64_epi32 (__m256i a) VPMOVQD xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi64_epi32 (__m256i a) VPMOVQD xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm_cvtsepi64_epi32 (__m128i a) VPMOVSQD xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm256_cvtsepi64_epi32 (__m256i a) VPMOVSQD xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm_cvtpd_epi64 (__m128d a) VCVTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128i _mm_cvtps_epi64 (__m128 a) VCVTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst |
Convert |
__m128i _mm_cvttpd_epi64 (__m128d a) VCVTTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128i _mm_cvttps_epi64 (__m128 a) VCVTTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst |
Convert |
__m128i _mm_cvtepi16_epi8 (__m128i a) VPMOVWB xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi32_epi8 (__m128i a) VPMOVDB xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi64_epi8 (__m128i a) VPMOVQB xmm1/m16 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi16_epi8 (__m128i a) VPMOVWB xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi32_epi8 (__m128i a) VPMOVDB xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtepi64_epi8 (__m128i a) VPMOVQB xmm1/m16 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi16_epi8 (__m256i a) VPMOVWB xmm1/m128 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi32_epi8 (__m256i a) VPMOVDB xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi64_epi8 (__m256i a) VPMOVQB xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi16_epi8 (__m256i a) VPMOVWB xmm1/m128 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi32_epi8 (__m256i a) VPMOVDB xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtepi64_epi8 (__m256i a) VPMOVQB xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtsepi16_epi8 (__m128i a) VPMOVSWB xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtsepi32_epi8 (__m128i a) VPMOVSDB xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128i _mm_cvtsepi64_epi8 (__m128i a) VPMOVSQB xmm1/m16 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtsepi16_epi8 (__m256i a) VPMOVSWB xmm1/m128 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtsepi32_epi8 (__m256i a) VPMOVSDB xmm1/m64 {k1}{z}, zmm2 |
Convert |
__m128i _mm256_cvtsepi64_epi8 (__m256i a) VPMOVSQB xmm1/m32 {k1}{z}, zmm2 |
Convert |
__m128 _mm_cvtepi64_ps (__m128i a) VCVTQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128 _mm_cvtepu32_ps (__m128i a) VCVTUDQ2PS xmm1 {k1}{z}, xmm2/m128/m32bcst |
Convert |
__m128 _mm_cvtepu64_ps (__m128i a) VCVTUQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128 _mm256_cvtepi64_ps (__m256i a) VCVTQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m128 _mm256_cvtepu64_ps (__m256i a) VCVTUQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m128i _mm_cvtepi32_epi16 (__m128i a) VPMOVDW xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi64_epi16 (__m128i a) VPMOVQW xmm1/m32 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi32_epi16 (__m128i a) VPMOVDW xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtepi64_epi16 (__m128i a) VPMOVQW xmm1/m32 {k1}{z}, xmm2 |
Convert |
__m128i _mm256_cvtepi32_epi16 (__m256i a) VPMOVDW xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi64_epi16 (__m256i a) VPMOVQW xmm1/m64 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi32_epi16 (__m256i a) VPMOVDW xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi64_epi16 (__m256i a) VPMOVQW xmm1/m64 {k1}{z}, ymm2 |
Convert |
__m128i _mm_cvtusepi32_epi16 (__m128i a) VPMOVUSDW xmm1/m64 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtusepi64_epi16 (__m128i a) VPMOVUSQW xmm1/m32 {k1}{z}, xmm2 |
Convert |
__m128i _mm256_cvtusepi32_epi16 (__m256i a) VPMOVUSDW xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtusepi64_epi16 (__m256i a) VPMOVUSQW xmm1/m64 {k1}{z}, ymm2 |
Convert |
__m128i _mm_cvtpd_epu32 (__m128d a) VCVTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128i _mm_cvtepi64_epi32 (__m128i a) VPMOVQD xmm1/m128 {k1}{z}, xmm2 |
Convert |
__m128i _mm_cvtps_epu32 (__m128 a) VCVTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst |
Convert |
__m128i _mm_cvtepi64_epi32 (__m128i a) VPMOVQD xmm1/m128 {k1}{z}, xmm2 |
Convert |
__m128i _mm256_cvtpd_epu32 (__m256d a) VCVTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m128i _mm256_cvtepi64_epi32 (__m256i a) VPMOVQD xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm256_cvtepi64_epi32 (__m256i a) VPMOVQD xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm_cvtusepi64_epi32 (__m128i a) VPMOVUSQD xmm1/m128 {k1}{z}, xmm2 |
Convert |
__m128i _mm256_cvtusepi64_epi32 (__m256i a) VPMOVUSQD xmm1/m128 {k1}{z}, ymm2 |
Convert |
__m128i _mm_cvttpd_epu32 (__m128d a) VCVTTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128i _mm_cvttps_epu32 (__m128 a) VCVTTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst |
Convert |
__m128i _mm256_cvttpd_epu32 (__m256d a) VCVTTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m128i _mm_cvtpd_epu64 (__m128d a) VCVTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128i _mm_cvtps_epu64 (__m128 a) VCVTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst |
Convert |
__m128i _mm_cvttpd_epu64 (__m128d a) VCVTTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Convert |
__m128i _mm_cvttps_epu64 (__m128 a) VCVTTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst |
Convert |
__m256d _mm512_cvtepu32_pd (__m128i a) VCVTUDQ2PD ymm1 {k1}{z}, xmm2/m128/m32bcst |
Convert |
__m256d _mm256_cvtepi64_pd (__m256i a) VCVTQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m256d _mm256_cvtepu64_pd (__m256i a) VCVTUQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m256i _mm256_cvtps_epi64 (__m128 a) VCVTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst |
Convert |
__m256i _mm256_cvtpd_epi64 (__m256d a) VCVTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m256i _mm256_cvttps_epi64 (__m128 a) VCVTTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst |
Convert |
__m256i _mm256_cvttpd_epi64 (__m256d a) VCVTTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m256 _mm256_cvtepu32_ps (__m256i a) VCVTUDQ2PS ymm1 {k1}{z}, ymm2/m256/m32bcst |
Convert |
__m256i _mm256_cvtps_epu32 (__m256 a) VCVTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst |
Convert |
__m256i _mm256_cvttps_epu32 (__m256 a) VCVTTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst |
Convert |
__m256i _mm256_cvtps_epu64 (__m128 a) VCVTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst |
Convert |
__m256i _mm256_cvtpd_epu64 (__m256d a) VCVTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Convert |
__m256i _mm256_cvttps_epu64 (__m128 a) VCVTTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst |
Convert |
__m256i _mm256_cvttpd_epu64 (__m256d a) VCVTTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Detect |
__m128i _mm_conflict_epi32 (__m128i a) VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst |
Detect |
__m128i _mm_conflict_epi64 (__m128i a) VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Detect |
__m128i _mm_conflict_epi32 (__m128i a) VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst |
Detect |
__m128i _mm_conflict_epi64 (__m128i a) VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Detect |
__m256i _mm256_conflict_epi32 (__m256i a) VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst |
Detect |
__m256i _mm256_conflict_epi64 (__m256i a) VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Detect |
__m256i _mm256_conflict_epi32 (__m256i a) VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst |
Detect |
__m256i _mm256_conflict_epi64 (__m256i a) VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Divide |
__m128d _mm_div_round_sd (__m128d a, __m128d b, int rounding) VDIVSS xmm1, xmm2, xmm3 {er} |
Divide |
__m128 _mm_div_round_ss (__m128 a, __m128 b, int rounding) VDIVSD xmm1, xmm2, xmm3 {er} |
Equals(Object) |
Determines whether the specified object is equal to the current object. (Inherited from Object) |
Fixup(Vector128<Double>, Vector128<Double>, Vector128<Int64>, Byte) |
__m128d _mm_fixupimm_pd(__m128d a, __m128d b, __m128i tbl, int imm); VFIXUPIMMPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 |
Fixup(Vector128<Single>, Vector128<Single>, Vector128<Int32>, Byte) |
__m128 _mm_fixupimm_ps(__m128 a, __m128 b, __m128i tbl, int imm); VFIXUPIMMPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 |
Fixup(Vector256<Double>, Vector256<Double>, Vector256<Int64>, Byte) |
__m256d _mm256_fixupimm_pd(__m256d a, __m256d b, __m256i tbl, int imm); VFIXUPIMMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
Fixup(Vector256<Single>, Vector256<Single>, Vector256<Int32>, Byte) |
__m256 _mm256_fixupimm_ps(__m256 a, __m256 b, __m256i tbl, int imm); VFIXUPIMMPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Fixup |
__m128d _mm_fixupimm_sd(__m128d a, __m128d b, __m128i tbl, int imm); VFIXUPIMMSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 |
Fixup |
__m128 _mm_fixupimm_ss(__m128 a, __m128 b, __m128i tbl, int imm); VFIXUPIMMSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 |
Fused |
__m128d _mm_fnmadd_round_sd (__m128d a, __m128d b, __m128d c, int r) VFNMADDSD xmm1, xmm2, xmm3 {er} |
Fused |
__m128 _mm_fnmadd_round_ss (__m128 a, __m128 b, __m128 c, int r) VFNMADDSS xmm1, xmm2, xmm3 {er} |
Fused |
__m128d _mm_fmadd_round_sd (__m128d a, __m128d b, __m128d c, int r) VFMADDSD xmm1, xmm2, xmm3 {er} |
Fused |
__m128 _mm_fmadd_round_ss (__m128 a, __m128 b, __m128 c, int r) VFMADDSS xmm1, xmm2, xmm3 {er} |
Fused |
__m128d _mm_fnmsub_round_sd (__m128d a, __m128d b, __m128d c, int r) VFNMSUBSS xmm1, xmm2, xmm3 {er} |
Fused |
__m128 _mm_fnmsub_round_ss (__m128 a, __m128 b, __m128 c, int r) VFNMSUBSS xmm1, xmm2, xmm3 {er} |
Fused |
__m128d _mm_fmsub_round_sd (__m128d a, __m128d b, __m128d c, int r) VFMSUBSS xmm1, xmm2, xmm3 {er} |
Fused |
__m128 _mm_fmsub_round_ss (__m128 a, __m128 b, __m128 c, int r) VFMSUBSS xmm1, xmm2, xmm3 {er} |
Get |
__m128d _mm_getexp_pd (__m128d a) VGETEXPPD xmm1 {k1}{z}, xmm2/m128/m64bcst |
Get |
__m128 _mm_getexp_ps (__m128 a) VGETEXPPS xmm1 {k1}{z}, xmm2/m128/m32bcst |
Get |
__m256d _mm256_getexp_pd (__m256d a) VGETEXPPD ymm1 {k1}{z}, ymm2/m256/m64bcst |
Get |
__m256 _mm256_getexp_ps (__m256 a) VGETEXPPS ymm1 {k1}{z}, ymm2/m256/m32bcst |
Get |
__m128d _mm_getexp_sd (__m128d a, __m128d b) VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Get |
__m128d _mm_getexp_sd (__m128d a) VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} |
Get |
__m128 _mm_getexp_ss (__m128 a, __m128 b) VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Get |
__m128 _mm_getexp_ss (__m128 a) VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} |
Get |
Serves as the default hash function. (Inherited from Object) |
Get |
__m128d _mm_getmant_pd (__m128d a) VGETMANTPD xmm1 {k1}{z}, xmm2/m128/m64bcst |
Get |
__m128 _mm_getmant_ps (__m128 a) VGETMANTPS xmm1 {k1}{z}, xmm2/m128/m32bcst |
Get |
__m256d _mm256_getmant_pd (__m256d a) VGETMANTPD ymm1 {k1}{z}, ymm2/m256/m64bcst |
Get |
__m256 _mm256_getmant_ps (__m256 a) VGETMANTPS ymm1 {k1}{z}, ymm2/m256/m32bcst |
Get |
__m128d _mm_getmant_sd (__m128d a) VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} |
Get |
__m128d _mm_getmant_sd (__m128d a, __m128d b) VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Get |
__m128 _mm_getmant_ss (__m128 a) VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} |
Get |
__m128 _mm_getmant_ss (__m128 a, __m128 b) VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Get |
Gets the Type of the current instance. (Inherited from Object) |
Leading |
__m128i _mm_lzcnt_epi32 (__m128i a) VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst |
Leading |
__m128i _mm_lzcnt_epi64 (__m128i a) VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Leading |
__m128i _mm_lzcnt_epi32 (__m128i a) VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst |
Leading |
__m128i _mm_lzcnt_epi64 (__m128i a) VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst |
Leading |
__m256i _mm256_lzcnt_epi32 (__m256i a) VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst |
Leading |
__m256i _mm256_lzcnt_epi64 (__m256i a) VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Leading |
__m256i _mm256_lzcnt_epi32 (__m256i a) VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst |
Leading |
__m256i _mm256_lzcnt_epi64 (__m256i a) VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst |
Max(Vector128<Int64>, Vector128<Int64>) |
__m128i _mm_max_epi64 (__m128i a, __m128i b) VPMAXSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Max(Vector128<UInt64>, Vector128<UInt64>) |
__m128i _mm_max_epu64 (__m128i a, __m128i b) VPMAXUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Max(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_max_epi64 (__m256i a, __m256i b) VPMAXSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Max(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_max_epu64 (__m256i a, __m256i b) VPMAXUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Memberwise |
Creates a shallow copy of the current Object. (Inherited from Object) |
Min(Vector128<Int64>, Vector128<Int64>) |
__m128i _mm_min_epi64 (__m128i a, __m128i b) VPMINSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Min(Vector128<UInt64>, Vector128<UInt64>) |
__m128i _mm_min_epu64 (__m128i a, __m128i b) VPMINUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Min(Vector256<Int64>, Vector256<Int64>) |
__m256i _mm256_min_epi64 (__m256i a, __m256i b) VPMINSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Min(Vector256<UInt64>, Vector256<UInt64>) |
__m256i _mm256_min_epu64 (__m256i a, __m256i b) VPMINUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Multiply |
__m128i _mm_mullo_epi64 (__m128i a, __m128i b) VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Multiply |
__m128i _mm_mullo_epi64 (__m128i a, __m128i b) VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Multiply |
__m256i _mm256_mullo_epi64 (__m256i a, __m256i b) VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Multiply |
__m256i _mm256_mullo_epi64 (__m256i a, __m256i b) VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Multiply |
__m128d _mm_mul_round_sd (__m128d a, __m128d b, int rounding) VMULSD xmm1, xmm2, xmm3 {er} |
Multiply |
__m128 _mm_mul_round_ss (__m128 a, __m128 b, int rounding) VMULSS xmm1, xmm2, xmm3 {er} |
Multi |
__m128i _mm_multishift_epi64_epi8(__m128i a, __m128i b) VPMULTISHIFTQB xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Multi |
__m128i _mm_multishift_epi64_epi8(__m128i a, __m128i b) VPMULTISHIFTQB xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Multi |
__m256i _mm256_multishift_epi64_epi8(__m256i a, __m256i b) VPMULTISHIFTQB ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Multi |
__m256i _mm256_multishift_epi64_epi8(__m256i a, __m256i b) VPMULTISHIFTQB ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Permute |
__m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b) VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256 |
Permute |
__m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b) VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256 |
Permute |
__m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b) VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256 |
Permute |
__m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b) VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256 |
Permute |
__m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b) VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128 |
Permute |
__m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b) VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128 |
Permute |
__m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b) VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128 |
Permute |
__m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b) VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128 |
Permute |
__m128d _mm_permutex2var_pd (__m128d a, __m128i idx, __m128i b) VPERMI2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Permute |
__m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b) VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Permute |
__m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b) VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Permute |
__m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b) VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256 |
Permute |
__m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b) VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256 |
Permute |
__m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b) VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256 |
Permute |
__m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b) VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256 |
Permute |
__m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b) VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst |
Permute |
__m128 _mm_permutex2var_ps (__m128 a, __m128i idx, __m128i b) VPERMI2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst |
Permute |
__m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b) VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst |
Permute |
__m256d _mm256_permute4x64_pd (__m256d a, __m256i b) VPERMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Permute |
__m256i _mm256_permute4x64_epi64 (__m256i a, __m256i b) VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Permute |
__m256i _mm256_permute4x64_pd (__m256d a, __m256i b) VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Permute |
__m256d _mm256_permutex2var_pd (__m256d a, __m256i idx, __m256i b) VPERMI2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Permute |
__m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b) VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Permute |
__m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b) VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Permute |
__m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b) VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128 |
Permute |
__m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b) VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128 |
Permute |
__m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b) VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128 |
Permute |
__m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b) VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128 |
Permute |
__m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b) VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst |
Permute |
__m256 _mm256_permutex2var_ps (__m256 a, __m256i idx, __m256i b) VPERMI2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst |
Permute |
__m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b) VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst |
Range(Vector128<Double>, Vector128<Double>, Byte) |
__m128d _mm_range_pd(__m128d a, __m128d b, int imm); VRANGEPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 |
Range(Vector128<Single>, Vector128<Single>, Byte) |
__m128 _mm_range_ps(__m128 a, __m128 b, int imm); VRANGEPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 |
Range(Vector256<Double>, Vector256<Double>, Byte) |
__m256d _mm256_range_pd(__m256d a, __m256d b, int imm); VRANGEPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
Range(Vector256<Single>, Vector256<Single>, Byte) |
__m256 _mm256_range_ps(__m256 a, __m256 b, int imm); VRANGEPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Range |
__m128d _mm_range_sd(__m128d a, __m128d b, int imm); VRANGESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 |
Range |
__m128 _mm_range_ss(__m128 a, __m128 b, int imm); VRANGESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 |
Reciprocal14(Vector128<Double>) |
__m128d _mm_rcp14_pd (__m128d a, __m128d b) VRCP14PD xmm1 {k1}{z}, xmm2/m128/m64bcst |
Reciprocal14(Vector128<Single>) |
__m128 _mm_rcp14_ps (__m128 a, __m128 b) VRCP14PS xmm1 {k1}{z}, xmm2/m128/m32bcst |
Reciprocal14(Vector256<Double>) |
__m256d _mm256_rcp14_pd (__m256d a, __m256d b) VRCP14PD ymm1 {k1}{z}, ymm2/m256/m64bcst |
Reciprocal14(Vector256<Single>) |
__m256 _mm256_rcp14_ps (__m256 a, __m256 b) VRCP14PS ymm1 {k1}{z}, ymm2/m256/m32bcst |
Reciprocal14Scalar(Vector128<Double>, Vector128<Double>) |
__m128d _mm_rcp14_sd (__m128d a, __m128d b) VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Reciprocal14Scalar(Vector128<Double>) |
__m128d _mm_rcp14_sd (__m128d a) VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64 |
Reciprocal14Scalar(Vector128<Single>, Vector128<Single>) |
__m128 _mm_rcp14_ss (__m128 a, __m128 b) VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Reciprocal14Scalar(Vector128<Single>) |
__m128 _mm_rcp14_ss (__m128 a) VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32 |
Reciprocal |
__m128d _mm_rsqrt14_pd (__m128d a, __m128d b) VRSQRT14PD xmm1 {k1}{z}, xmm2/m128/m64bcst |
Reciprocal |
__m128 _mm_rsqrt14_ps (__m128 a, __m128 b) VRSQRT14PS xmm1 {k1}{z}, xmm2/m128/m32bcst |
Reciprocal |
__m256d _mm256_rsqrt14_pd (__m256d a, __m256d b) VRSQRT14PD ymm1 {k1}{z}, ymm2/m256/m64bcst |
Reciprocal |
__m256 _mm256_rsqrt14_ps (__m256 a, __m256 b) VRSQRT14PS ymm1 {k1}{z}, ymm2/m256/m32bcst |
Reciprocal |
__m128d _mm_rsqrt14_sd (__m128d a, __m128d b) VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Reciprocal |
__m128d _mm_rsqrt14_sd (__m128d a) VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64 |
Reciprocal |
__m128 _mm_rsqrt14_ss (__m128 a, __m128 b) VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Reciprocal |
__m128 _mm_rsqrt14_ss (__m128 a) VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32 |
Reduce(Vector128<Double>, Byte) |
__m128d _mm_reduce_pd(__m128d a, int imm); VREDUCEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8 |
Reduce(Vector128<Single>, Byte) |
__m128 _mm_reduce_ps(__m128 a, int imm); VREDUCEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8 |
Reduce(Vector256<Double>, Byte) |
__m256d _mm256_reduce_pd(__m256d a, int imm); VREDUCEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8 |
Reduce(Vector256<Single>, Byte) |
__m256 _mm256_reduce_ps(__m256 a, int imm); VREDUCEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8 |
Reduce |
__m128d _mm_reduce_sd(__m128d a, int imm); VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 |
Reduce |
__m128d _mm_reduce_sd(__m128d a, __m128d b, int imm); VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Reduce |
__m128 _mm_reduce_ss(__m128 a, int imm); VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 |
Reduce |
__m128 _mm_reduce_ss(__m128 a, __m128 b, int imm); VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Rotate |
__m128i _mm_rol_epi32 (__m128i a, int imm8) VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8 |
Rotate |
__m128i _mm_rol_epi64 (__m128i a, int imm8) VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8 |
Rotate |
__m128i _mm_rol_epi32 (__m128i a, int imm8) VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8 |
Rotate |
__m128i _mm_rol_epi64 (__m128i a, int imm8) VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8 |
Rotate |
__m256i _mm256_rol_epi32 (__m256i a, int imm8) VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8 |
Rotate |
__m256i _mm256_rol_epi64 (__m256i a, int imm8) VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8 |
Rotate |
__m256i _mm256_rol_epi32 (__m256i a, int imm8) VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8 |
Rotate |
__m256i _mm256_rol_epi64 (__m256i a, int imm8) VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8 |
Rotate |
__m128i _mm_rolv_epi32 (__m128i a, __m128i b) VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst |
Rotate |
__m128i _mm_rolv_epi64 (__m128i a, __m128i b) VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Rotate |
__m128i _mm_rolv_epi32 (__m128i a, __m128i b) VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst |
Rotate |
__m128i _mm_rolv_epi64 (__m128i a, __m128i b) VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Rotate |
__m256i _mm256_rolv_epi32 (__m256i a, __m256i b) VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst |
Rotate |
__m256i _mm256_rolv_epi64 (__m256i a, __m256i b) VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Rotate |
__m256i _mm256_rolv_epi32 (__m256i a, __m256i b) VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst |
Rotate |
__m256i _mm256_rolv_epi64 (__m256i a, __m256i b) VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Rotate |
__m128i _mm_ror_epi32 (__m128i a, int imm8) VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8 |
Rotate |
__m128i _mm_ror_epi64 (__m128i a, int imm8) VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8 |
Rotate |
__m128i _mm_ror_epi32 (__m128i a, int imm8) VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8 |
Rotate |
__m128i _mm_ror_epi64 (__m128i a, int imm8) VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8 |
Rotate |
__m256i _mm256_ror_epi32 (__m256i a, int imm8) VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8 |
Rotate |
__m256i _mm256_ror_epi64 (__m256i a, int imm8) VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8 |
Rotate |
__m256i _mm256_ror_epi32 (__m256i a, int imm8) VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8 |
Rotate |
__m256i _mm256_ror_epi64 (__m256i a, int imm8) VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8 |
Rotate |
__m128i _mm_rorv_epi32 (__m128i a, __m128i b) VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst |
Rotate |
__m128i _mm_rorv_epi64 (__m128i a, __m128i b) VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Rotate |
__m128i _mm_rorv_epi32 (__m128i a, __m128i b) VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst |
Rotate |
__m128i _mm_rorv_epi64 (__m128i a, __m128i b) VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Rotate |
__m256i _mm256_rorv_epi32 (__m256i a, __m256i b) VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst |
Rotate |
__m256i _mm256_rorv_epi64 (__m256i a, __m256i b) VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Rotate |
__m256i _mm256_rorv_epi32 (__m256i a, __m256i b) VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst |
Rotate |
__m256i _mm256_rorv_epi64 (__m256i a, __m256i b) VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Round |
__m128d _mm_roundscale_pd (__m128d a, int imm) VRNDSCALEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8 |
Round |
__m128 _mm_roundscale_ps (__m128 a, int imm) VRNDSCALEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8 |
Round |
__m256d _mm256_roundscale_pd (__m256d a, int imm) VRNDSCALEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8 |
Round |
__m256 _mm256_roundscale_ps (__m256 a, int imm) VRNDSCALEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8 |
Round |
__m128d _mm_roundscale_sd (__m128d a, int imm) VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 |
Round |
__m128d _mm_roundscale_sd (__m128d a, __m128d b, int imm) VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Round |
__m128 _mm_roundscale_ss (__m128 a, int imm) VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 |
Round |
__m128 _mm_roundscale_ss (__m128 a, __m128 b, int imm) VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. |
Scale(Vector128<Double>, Vector128<Double>) |
__m128d _mm_scalef_pd (__m128d a, int imm) VSCALEFPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Scale(Vector128<Single>, Vector128<Single>) |
__m128 _mm_scalef_ps (__m128 a, int imm) VSCALEFPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst |
Scale(Vector256<Double>, Vector256<Double>) |
__m256d _mm256_scalef_pd (__m256d a, int imm) VSCALEFPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Scale(Vector256<Single>, Vector256<Single>) |
__m256 _mm256_scalef_ps (__m256 a, int imm) VSCALEFPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst |
Scale |
__m128d _mm_scalef_round_sd (__m128d a, __m128d b) VSCALEFSD xmm1, xmm2, xmm3 {er} |
Scale |
__m128d _mm_scalef_sd (__m128d a, __m128d b) VSCALEFSD xmm1 {k1}{z}, xmm2, xmm3/m64{er} |
Scale |
__m128 _mm_scalef_round_ss (__m128 a, __m128 b) VSCALEFSS xmm1, xmm2, xmm3 {er} |
Scale |
__m128 _mm_scalef_ss (__m128 a, __m128 b) VSCALEFSS xmm1 {k1}{z}, xmm2, xmm3/m32{er} |
Shift |
__m128i _mm_sllv_epi16 (__m128i a, __m128i count) VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128 |
Shift |
__m128i _mm_sllv_epi16 (__m128i a, __m128i count) VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128 |
Shift |
__m256i _mm256_sllv_epi16 (__m256i a, __m256i count) VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256 |
Shift |
__m256i _mm256_sllv_epi16 (__m256i a, __m256i count) VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256 |
Shift |
__128i _mm_srai_epi64 (__m128i a, int imm8) VPSRAQ xmm1 {k1}{z}, xmm2, imm8 |
Shift |
__m128i _mm_sra_epi64 (__m128i a, __m128i count) VPSRAQ xmm1 {k1}{z}, xmm2, xmm3/m128 |
Shift |
__m256i _mm256_srai_epi64 (__m256i a, int imm8) VPSRAQ ymm1 {k1}{z}, ymm2, imm8 |
Shift |
__m256i _mm256_sra_epi64 (__m256i a, __m128i count) VPSRAQ ymm1 {k1}{z}, ymm2, xmm3/m128 |
Shift |
__m128i _mm_srav_epi16 (__m128i a, __m128i count) VPSRAVW xmm1 {k1}{z}, xmm2, xmm3/m128 |
Shift |
__m128i _mm_srav_epi64 (__m128i a, __m128i count) VPSRAVQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst |
Shift |
__m256i _mm256_srav_epi16 (__m256i a, __m256i count) VPSRAVW ymm1 {k1}{z}, ymm2, ymm3/m256 |
Shift |
__m256i _mm256_srav_epi64 (__m256i a, __m256i count) VPSRAVQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst |
Shift |
__m128i _mm_srlv_epi16 (__m128i a, __m128i count) VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128 |
Shift |
__m128i _mm_srlv_epi16 (__m128i a, __m128i count) VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128 |
Shift |
__m256i _mm256_srlv_epi16 (__m256i a, __m256i count) VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256 |
Shift |
__m256i _mm256_srlv_epi16 (__m256i a, __m256i count) VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256 |
Shuffle2x128(Vector256<Double>, Vector256<Double>, Byte) |
__m256d _mm256_shuffle_f64x2 (__m256d a, __m256d b, const int imm8) VSHUFF64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
Shuffle2x128(Vector256<Int32>, Vector256<Int32>, Byte) |
__m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8) VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Shuffle2x128(Vector256<Int64>, Vector256<Int64>, Byte) |
__m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8) VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
Shuffle2x128(Vector256<Single>, Vector256<Single>, Byte) |
__m256 _mm256_shuffle_f32x4 (__m256 a, __m256 b, const int imm8) VSHUFF32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Shuffle2x128(Vector256<UInt32>, Vector256<UInt32>, Byte) |
__m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8) VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Shuffle2x128(Vector256<UInt64>, Vector256<UInt64>, Byte) |
__m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8) VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
Sqrt |
__m128d _mm_sqrt_round_sd (__m128d a, __m128d b, int rounding) VSQRTSD xmm1, xmm2 xmm3 {er} |
Sqrt |
__m128 _mm_sqrt_round_ss (__m128 a, __m128 b, int rounding) VSQRTSS xmm1, xmm2, xmm3 {er} |
Subtract |
__m128d _mm_sub_round_sd (__m128d a, __m128d b, int rounding) VSUBSD xmm1, xmm2, xmm3 {er} |
Subtract |
__m128 _mm_sub_round_ss (__m128 a, __m128 b, int rounding) VSUBSS xmm1, xmm2, xmm3 {er} |
Sum |
__m128i _mm_dbsad_epu8 (__m128i a, __m128i b, int imm8) VDBPSADBW xmm1 {k1}{z}, xmm2, xmm3/m128 |
Sum |
__m256i _mm256_dbsad_epu8 (__m256i a, __m256i b, int imm8) VDBPSADBW ymm1 {k1}{z}, ymm2, ymm3/m256 |
Ternary |
__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m128d _mm_ternarylogic_pd (__m128d a, __m128d b, __m128d c, int imm) VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 |
Ternary |
__m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm) VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 |
Ternary |
__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m128 _mm_ternarylogic_ps (__m128 a, __m128 b, __m128 c, int imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 |
Ternary |
__m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm) VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 |
Ternary |
__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m256d _mm256_ternarylogic_pd (__m256d a, __m256d b, __m256d c, int imm) VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Ternary |
__m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm) VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
Ternary |
__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m256 _mm256_ternarylogic_ps (__m256 a, __m256 b, __m256 c, int imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs. |
Ternary |
__m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 |
Ternary |
__m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm) VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 |
To |
Returns a string that represents the current object. (Inherited from Object) |
Product | Versions |
---|---|
.NET | 9 |
.NET feedback
.NET is an open source project. Select a link to provide feedback: