AdvSimd Sınıf
Tanım
Önemli
Bazı bilgiler ürünün ön sürümüyle ilgilidir ve sürüm öncesinde önemli değişiklikler yapılmış olabilir. Burada verilen bilgilerle ilgili olarak Microsoft açık veya zımni hiçbir garanti vermez.
Önemli
Bu API, CLS uyumlu değildir.
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar.
public ref class AdvSimd abstract : System::Runtime::Intrinsics::Arm::ArmBase
[System.CLSCompliant(false)]
public abstract class AdvSimd : System.Runtime.Intrinsics.Arm.ArmBase
[<System.CLSCompliant(false)>]
type AdvSimd = class
inherit ArmBase
Public MustInherit Class AdvSimd
Inherits ArmBase
- Devralma
- Türetilmiş
- Öznitelikler
Özellikler
IsSupported |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
Yöntemler
Abs(Vector128<Int16>) |
int16x8_t vabsq_s16 (int16x8_t a) A32: VABS. S16 Qd, Qm A64: ABS Vd.8H, Vn.8H |
Abs(Vector128<Int32>) |
int32x4_t vabsq_s32 (int32x4_t a) A32: VABS. S32 Qd, Qm A64: ABS Vd.4S, Vn.4S |
Abs(Vector128<SByte>) |
int8x16_t vabsq_s8 (int8x16_t a) A32: VABS. S8 Qd, Qm A64: ABS Vd.16B, Vn.16B |
Abs(Vector128<Single>) |
float32x4_t vabsq_f32 (float32x4_t a) A32: VABS. F32 Qd, Qm A64: FABS Vd.4S, Vn.4S |
Abs(Vector64<Int16>) |
int16x4_t vabs_s16 (int16x4_t a) A32: VABS. S16 Dd, Dm A64: ABS Vd.4H, Vn.4H |
Abs(Vector64<Int32>) |
int32x2_t vabs_s32 (int32x2_t a) A32: VABS. S32 Dd, Dm A64: ABS Vd.2S, Vn.2S |
Abs(Vector64<SByte>) |
int8x8_t vabs_s8 (int8x8_t a) A32: VABS. S8 Dd, Dm A64: ABS Vd.8B, Vn.8B |
Abs(Vector64<Single>) |
float32x2_t vabs_f32 (float32x2_t a) A32: VABS. F32 Dd, Dm A64: FABS Vd.2S, Vn.2S |
AbsoluteCompareGreaterThan(Vector128<Single>, Vector128<Single>) |
uint32x4_t vcagtq_f32 (a, float32x4_t b float32x4_t) A32: VACGT. F32 Qd, Qn, Qm A64: FACGT Vd.4S, Vn.4S, Vm.4S |
AbsoluteCompareGreaterThan(Vector64<Single>, Vector64<Single>) |
uint32x2_t vcagt_f32 (float32x2_t a, float32x2_t b) A32: VACGT. F32 Dd, Dn, Dm A64: FACGT Vd.2S, Vn.2S, Vm.2S |
AbsoluteCompareGreaterThanOrEqual(Vector128<Single>, Vector128<Single>) |
uint32x4_t vcageq_f32 (a, float32x4_t b float32x4_t) A32: VACGE. F32 Qd, Qn, Qm A64: FACGE Vd.4S, Vn.4S, Vm.4S |
AbsoluteCompareGreaterThanOrEqual(Vector64<Single>, Vector64<Single>) |
uint32x2_t vcage_f32 (float32x2_t a, float32x2_t b) A32: VACGE. F32 Dd, Dn, Dm A64: FACGE Vd.2S, Vn.2S, Vm.2S |
AbsoluteCompareLessThan(Vector128<Single>, Vector128<Single>) |
uint32x4_t vcaltq_f32 (float32x4_t a, float32x4_t b) A32: VACLT. F32 Qd, Qn, Qm A64: FACGT Vd.4S, Vn.4S, Vm.4S |
AbsoluteCompareLessThan(Vector64<Single>, Vector64<Single>) |
uint32x2_t vcalt_f32 (float32x2_t a, float32x2_t b) A32: VACLT. F32 Dd, Dn, Dm A64: FACGT Vd.2S, Vn.2S, Vm.2S |
AbsoluteCompareLessThanOrEqual(Vector128<Single>, Vector128<Single>) |
uint32x4_t vcaleq_f32 (float32x4_t a, float32x4_t b) A32: VACLE. F32 Qd, Qn, Qm A64: FACGE Vd.4S, Vn.4S, Vm.4S |
AbsoluteCompareLessThanOrEqual(Vector64<Single>, Vector64<Single>) |
uint32x2_t vcale_f32 (float32x2_t a, float32x2_t b) A32: VACLE. F32 Dd, Dn, Dm A64: FACGE Vd.2S, Vn.2S, Vm.2S |
AbsoluteDifference(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vabdq_u8 (uint8x16_t a, uint8x16_t b) A32: VABD. U8 Qd, Qn, Qm A64: UABD Vd.16B, Vn.16B, Vm.16B |
AbsoluteDifference(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vabdq_s16 (int16x8_t a, int16x8_t b) A32: VABD. S16 Qd, Qn, Qm A64: SABD Vd.8H, Vn.8H, Vm.8H |
AbsoluteDifference(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vabdq_s32 (a, int32x4_t b int32x4_t) A32: VABD. S32 Qd, Qn, Qm A64: SABD Vd.4S, Vn.4S, Vm.4S |
AbsoluteDifference(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vabdq_s8 (int8x16_t a, int8x16_t b) A32: VABD. S8 Qd, Qn, Qm A64: SABD Vd.16B, Vn.16B, Vm.16B |
AbsoluteDifference(Vector128<Single>, Vector128<Single>) |
float32x4_t vabdq_f32 (float32x4_t a, float32x4_t b) A32: VABD. F32 Qd, Qn, Qm A64: FABD Vd.4S, Vn.4S, Vm.4S |
AbsoluteDifference(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vabdq_u16 (uint16x8_t a, uint16x8_t b) A32: VABD. U16 Qd, Qn, Qm A64: UABD Vd.8H, Vn.8H, Vm.8H |
AbsoluteDifference(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vabdq_u32 (uint32x4_t a, uint32x4_t b) A32: VABD. U32 Qd, Qn, Qm A64: UABD Vd.4S, Vn.4S, Vm.4S |
AbsoluteDifference(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vabd_u8 (a, uint8x8_t b uint8x8_t) A32: VABD. U8 Dd, Dn, Dm A64: UABD Vd.8B, Vn.8B, Vm.8B |
AbsoluteDifference(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vabd_s16 (int16x4_t a, int16x4_t b) A32: VABD. S16 Dd, Dn, Dm A64: SABD Vd.4H, Vn.4H, Vm.4H |
AbsoluteDifference(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vabd_s32 (int32x2_t a, int32x2_t b) A32: VABD. S32 Dd, Dn, Dm A64: SABD Vd.2S, Vn.2S, Vm.2S |
AbsoluteDifference(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vabd_s8 (int8x8_t a, int8x8_t b) A32: VABD. S8 Dd, Dn, Dm A64: SABD Vd.8B, Vn.8B, Vm.8B |
AbsoluteDifference(Vector64<Single>, Vector64<Single>) |
float32x2_t vabd_f32 (float32x2_t a, float32x2_t b) A32: VABD. F32 Dd, Dn, Dm A64: FABD Vd.2S, Vn.2S, Vm.2S |
AbsoluteDifference(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vabd_u16 (a, uint16x4_t b uint16x4_t) A32: VABD. U16 Dd, Dn, Dm A64: UABD Vd.4H, Vn.4H, Vm.4H |
AbsoluteDifference(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vabd_u32 (uint32x2_t a, uint32x2_t b) A32: VABD. U32 Dd, Dn, Dm A64: UABD Vd.2S, Vn.2S, Vm.2S |
AbsoluteDifferenceAdd(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vabaq_u8 (a, uint8x16_t b, uint8x16_t c uint8x16_t) A32: VABA. U8 Qd, Qn, Qm A64: UABA Vd.16B, Vn.16B, Vm.16B |
AbsoluteDifferenceAdd(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>) |
int16x8_t vabaq_s16 (a, int16x8_t b, int16x8_t c int16x8_t) A32: VABA. S16 Qd, Qn, Qm A64: SABA Vd.8H, Vn.8H, Vm.8H |
AbsoluteDifferenceAdd(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>) |
int32x4_t vabaq_s32 (a, int32x4_t b, int32x4_t c int32x4_t) A32: VABA. S32 Qd, Qn, Qm A64: SABA Vd.4S, Vn.4S, Vm.4S |
AbsoluteDifferenceAdd(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>) |
int8x16_t vabaq_s8 (a, int8x16_t b, int8x16_t c int8x16_t) A32: VABA. S8 Qd, Qn, Qm A64: SABA Vd.16B, Vn.16B, Vm.16B |
AbsoluteDifferenceAdd(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vabaq_u16 (a, uint16x8_t b, uint16x8_t c uint16x8_t) A32: VABA. U16 Qd, Qn, Qm A64: UABA Vd.8H, Vn.8H, Vm.8H |
AbsoluteDifferenceAdd(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vabaq_u32 (a, uint32x4_t b, uint32x4_t c uint32x4_t) A32: VABA. U32 Qd, Qn, Qm A64: UABA Vd.4S, Vn.4S, Vm.4S |
AbsoluteDifferenceAdd(Vector64<Byte>, Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vaba_u8 (a, uint8x8_t b, uint8x8_t c uint8x8_t) A32: VABA. U8 Dd, Dn, Dm A64: UABA Vd.8B, Vn.8B, Vm.8B |
AbsoluteDifferenceAdd(Vector64<Int16>, Vector64<Int16>, Vector64<Int16>) |
int16x4_t vaba_s16 (a, int16x4_t b, int16x4_t c int16x4_t) A32: VABA. S16 Dd, Dn, Dm A64: SABA Vd.4H, Vn.4H, Vm.4H |
AbsoluteDifferenceAdd(Vector64<Int32>, Vector64<Int32>, Vector64<Int32>) |
int32x2_t vaba_s32 (a, int32x2_t b, int32x2_t c int32x2_t) A32: VABA. S32 Dd, Dn, Dm A64: SABA Vd.2S, Vn.2S, Vm.2S |
AbsoluteDifferenceAdd(Vector64<SByte>, Vector64<SByte>, Vector64<SByte>) |
int8x8_t vaba_s8 (a, int8x8_t b, int8x8_t c int8x8_t) A32: VABA. S8 Dd, Dn, Dm A64: SABA Vd.8B, Vn.8B, Vm.8B |
AbsoluteDifferenceAdd(Vector64<UInt16>, Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vaba_u16 (a, uint16x4_t b, uint16x4_t c uint16x4_t) A32: VABA. U16 Dd, Dn, Dm A64: UABA Vd.4H, Vn.4H, Vm.4H |
AbsoluteDifferenceAdd(Vector64<UInt32>, Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vaba_u32 (a, uint32x2_t b, uint32x2_t c uint32x2_t) A32: VABA. U32 Dd, Dn, Dm A64: UABA Vd.2S, Vn.2S, Vm.2S |
AbsoluteDifferenceWideningLower(Vector64<Byte>, Vector64<Byte>) |
uint16x8_t vabdl_u8 (uint8x8_t a, uint8x8_t b) A32: VABDL. U8 Qd, Dn, Dm A64: UABDL Vd.8H, Vn.8B, Vm.8B |
AbsoluteDifferenceWideningLower(Vector64<Int16>, Vector64<Int16>) |
int32x4_t vabdl_s16 (int16x4_t a, int16x4_t b) A32: VABDL. S16 Qd, Dn, Dm A64: SABDL Vd.4S, Vn.4H, Vm.4H |
AbsoluteDifferenceWideningLower(Vector64<Int32>, Vector64<Int32>) |
int64x2_t vabdl_s32 (int32x2_t a, int32x2_t b) A32: VABDL. S32 Qd, Dn, Dm A64: SABDL Vd.2D, Vn.2S, Vm.2S |
AbsoluteDifferenceWideningLower(Vector64<SByte>, Vector64<SByte>) |
int16x8_t vabdl_s8 (int8x8_t a, int8x8_t b) A32: VABDL. S8 Qd, Dn, Dm A64: SABDL Vd.8H, Vn.8B, Vm.8B |
AbsoluteDifferenceWideningLower(Vector64<UInt16>, Vector64<UInt16>) |
uint32x4_t vabdl_u16 (uint16x4_t a, uint16x4_t b) A32: VABDL. U16 Qd, Dn, Dm A64: UABDL Vd.4S, Vn.4H, Vm.4H |
AbsoluteDifferenceWideningLower(Vector64<UInt32>, Vector64<UInt32>) |
uint64x2_t vabdl_u32 (uint32x2_t a, uint32x2_t b) A32: VABDL. U32 Qd, Dn, Dm A64: UABDL Vd.2D, Vn.2S, Vm.2S |
AbsoluteDifferenceWideningLowerAndAdd(Vector128<Int16>, Vector64<SByte>, Vector64<SByte>) |
int16x8_t vabal_s8 (a, int8x8_t b, int8x8_t c int16x8_t) A32: VABAL. S8 Qd, Dn, Dm A64: SABAL Vd.8H, Vn.8B, Vm.8B |
AbsoluteDifferenceWideningLowerAndAdd(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>) |
int32x4_t vabal_s16 (a, int16x4_t b, int16x4_t c int32x4_t) A32: VABAL. S16 Qd, Dn, Dm A64: SABAL Vd.4S, Vn.4H, Vm.4H |
AbsoluteDifferenceWideningLowerAndAdd(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>) |
int64x2_t vabal_s32 (a, int32x2_t b, int32x2_t c int64x2_t) A32: VABAL. S32 Qd, Dn, Dm A64: SABAL Vd.2D, Vn.2S, Vm.2S |
AbsoluteDifferenceWideningLowerAndAdd(Vector128<UInt16>, Vector64<Byte>, Vector64<Byte>) |
uint16x8_t vabal_u8 (a, uint8x8_t b, uint8x8_t c uint16x8_t) A32: VABAL. U8 Qd, Dn, Dm A64: UABAL Vd.8H, Vn.8B, Vm.8B |
AbsoluteDifferenceWideningLowerAndAdd(Vector128<UInt32>, Vector64<UInt16>, Vector64<UInt16>) |
uint32x4_t vabal_u16 (a, uint16x4_t b, uint16x4_t c uint32x4_t) A32: VABAL. U16 Qd, Dn, Dm A64: UABAL Vd.4S, Vn.4H, Vm.4H |
AbsoluteDifferenceWideningLowerAndAdd(Vector128<UInt64>, Vector64<UInt32>, Vector64<UInt32>) |
uint64x2_t vabal_u32 (a, uint32x2_t b, uint32x2_t c uint64x2_t) A32: VABAL. U32 Qd, Dn, Dm A64: UABAL Vd.2D, Vn.2S, Vm.2S |
AbsoluteDifferenceWideningUpper(Vector128<Byte>, Vector128<Byte>) |
uint16x8_t vabdl_high_u8 (uint8x16_t a, uint8x16_t b) A32: VABDL. U8 Qd, Dn+1, Dm+1 A64: UABDL2 Vd.8H, Vn.16B, Vm.16B |
AbsoluteDifferenceWideningUpper(Vector128<Int16>, Vector128<Int16>) |
int32x4_t vabdl_high_s16 (int16x8_t a, int16x8_t b) A32: VABDL. S16 Qd, Dn+1, Dm+1 A64: SABDL2 Vd.4S, Vn.8H, Vm.8H |
AbsoluteDifferenceWideningUpper(Vector128<Int32>, Vector128<Int32>) |
int64x2_t vabdl_high_s32 (int32x4_t a, int32x4_t b) A32: VABDL. S32 Qd, Dn+1, Dm+1 A64: SABDL2 Vd.2D, Vn.4S, Vm.4S |
AbsoluteDifferenceWideningUpper(Vector128<SByte>, Vector128<SByte>) |
int16x8_t vabdl_high_s8 (int8x16_t a, int8x16_t b) A32: VABDL. S8 Qd, Dn+1, Dm+1 A64: SABDL2 Vd.8H, Vn.16B, Vm.16B |
AbsoluteDifferenceWideningUpper(Vector128<UInt16>, Vector128<UInt16>) |
uint32x4_t vabdl_high_u16 (uint16x8_t a, uint16x8_t b) A32: VABDL. U16 Qd, Dn+1, Dm+1 A64: UABDL2 Vd.4S, Vn.8H, Vm.8H |
AbsoluteDifferenceWideningUpper(Vector128<UInt32>, Vector128<UInt32>) |
uint64x2_t vabdl_high_u32 (uint32x4_t a, uint32x4_t b) A32: VABDL. U32 Qd, Dn+1, Dm+1 A64: UABDL2 Vd.2D, Vn.4S, Vm.4S |
AbsoluteDifferenceWideningUpperAndAdd(Vector128<Int16>, Vector128<SByte>, Vector128<SByte>) |
int16x8_t vabal_high_s8 (a, int8x16_t b, int8x16_t c int16x8_t) A32: VABAL. S8 Qd, Dn+1, Dm+1 A64: SABAL2 Vd.8H, Vn.16B, Vm.16B |
AbsoluteDifferenceWideningUpperAndAdd(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>) |
int32x4_t vabal_high_s16 (a, int16x8_t b, int16x8_t c int32x4_t) A32: VABAL. S16 Qd, Dn+1, Dm+1 A64: SABAL2 Vd.4S, Vn.8H, Vm.8H |
AbsoluteDifferenceWideningUpperAndAdd(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>) |
int64x2_t vabal_high_s32 (a, int32x4_t b, int32x4_t c int64x2_t) A32: VABAL. S32 Qd, Dn+1, Dm+1 A64: SABAL2 Vd.2D, Vn.4S, Vm.4S |
AbsoluteDifferenceWideningUpperAndAdd(Vector128<UInt16>, Vector128<Byte>, Vector128<Byte>) |
uint16x8_t vabal_high_u8 (a, uint8x16_t b, uint8x16_t c uint16x8_t) A32: VABAL. U8 Qd, Dn+1, Dm+1 A64: UABAL2 Vd.8H, Vn.16B, Vm.16B |
AbsoluteDifferenceWideningUpperAndAdd(Vector128<UInt32>, Vector128<UInt16>, Vector128<UInt16>) |
uint32x4_t vabal_high_u16 (a, uint16x8_t b, uint16x8_t c uint32x4_t) A32: VABAL. U16 Qd, Dn+1, Dm+1 A64: UABAL2 Vd.4S, Vn.8H, Vm.8H |
AbsoluteDifferenceWideningUpperAndAdd(Vector128<UInt64>, Vector128<UInt32>, Vector128<UInt32>) |
uint64x2_t vabal_high_u32 (a, uint32x4_t b, uint32x4_t c uint64x2_t) A32: VABAL. U32 Qd, Dn+1, Dm+1 A64: UABAL2 Vd.2D, Vn.4S, Vm.4S |
AbsSaturate(Vector128<Int16>) |
int16x8_t vqabsq_s16 (a int16x8_t) A32: VQABS. S16 Qd, Qm A64: SQABS Vd.8H, Vn.8H |
AbsSaturate(Vector128<Int32>) |
int32x4_t vqabsq_s32 (int32x4_t a) A32: VQABS. S32 Qd, Qm A64: SQABS Vd.4S, Vn.4S |
AbsSaturate(Vector128<SByte>) |
int8x16_t vqabsq_s8 (int8x16_t a) A32: VQABS. S8 Qd, Qm A64: SQABS Vd.16B, Vn.16B |
AbsSaturate(Vector64<Int16>) |
int16x4_t vqabs_s16 (int16x4_t a) A32: VQABS. S16 Dd, Dm A64: SQABS Vd.4H, Vn.4H |
AbsSaturate(Vector64<Int32>) |
int32x2_t vqabs_s32 (int32x2_t a) A32: VQABS. S32 Dd, Dm A64: SQABS Vd.2S, Vn.2S |
AbsSaturate(Vector64<SByte>) |
int8x8_t vqabs_s8 (int8x8_t a) A32: VQABS. S8 Dd, Dm A64: SQABS Vd.8B, Vn.8B |
AbsScalar(Vector64<Double>) |
float64x1_t vabs_f64 (float64x1_t a) A32: VABS. F64 Dd, Dm A64: FABS Dd, Dn |
AbsScalar(Vector64<Single>) |
float32_t vabss_f32 (float32_t a) A32: VABS. F32 Sd, Sm A64: FABS Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Add(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vaddq_u8 (uint8x16_t a, uint8x16_t b) A32: VADD. I8 Qd, Qn, Qm A64: ADD Vd.16B, Vn.16B, Vm.16B |
Add(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vaddq_s16 (int16x8_t a, int16x8_t b) A32: VADD. I16 Qd, Qn, Qm A64: ADD Vd.8H, Vn.8H, Vm.8H |
Add(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vaddq_s32 (a, int32x4_t b int32x4_t) A32: VADD. I32 Qd, Qn, Qm A64: ADD Vd.4S, Vn.4S, Vm.4S |
Add(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vaddq_s64 (a, int64x2_t b int64x2_t) A32: VADD. I64 Qd, Qn, Qm A64: ADD Vd.2D, Vn.2D, Vm.2D |
Add(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vaddq_s8 (int8x16_t a, int8x16_t b) A32: VADD. I8 Qd, Qn, Qm A64: ADD Vd.16B, Vn.16B, Vm.16B |
Add(Vector128<Single>, Vector128<Single>) |
float32x4_t vaddq_f32 (float32x4_t a, float32x4_t b) A32: VADD. F32 Qd, Qn, Qm A64: FADD Vd.4S, Vn.4S, Vm.4S |
Add(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vaddq_u16 (uint16x8_t a, uint16x8_t b) A32: VADD. I16 Qd, Qn, Qm A64: ADD Vd.8H, Vn.8H, Vm.8H |
Add(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vaddq_u32 (uint32x4_t a, uint32x4_t b) A32: VADD. I32 Qd, Qn, Qm A64: ADD Vd.4S, Vn.4S, Vm.4S |
Add(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vaddq_u64 (uint64x2_t a, uint64x2_t b) A32: VADD. I64 Qd, Qn, Qm A64: ADD Vd.2D, Vn.2D, Vm.2D |
Add(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vadd_u8 (uint8x8_t a, uint8x8_t b) A32: VADD. I8 Dd, Dn, Dm A64: ADD Vd.8B, Vn.8B, Vm.8B |
Add(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vadd_s16 (int16x4_t a, int16x4_t b) A32: VADD. I16 Dd, Dn, Dm A64: ADD Vd.4H, Vn.4H, Vm.4H |
Add(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vadd_s32 (int32x2_t a, int32x2_t b) A32: VADD. I32 Dd, Dn, Dm A64: ADD Vd.2S, Vn.2S, Vm.2S |
Add(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vadd_s8 (int8x8_t a, int8x8_t b) A32: VADD. I8 Dd, Dn, Dm A64: ADD Vd.8B, Vn.8B, Vm.8B |
Add(Vector64<Single>, Vector64<Single>) |
float32x2_t vadd_f32 (float32x2_t a, float32x2_t b) A32: VADD. F32 Dd, Dn, Dm A64: FADD Vd.2S, Vn.2S, Vm.2S |
Add(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vadd_u16 (uint16x4_t a, uint16x4_t b) A32: VADD. I16 Dd, Dn, Dm A64: ADD Vd.4H, Vn.4H, Vm.4H |
Add(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vadd_u32 (uint32x2_t a, uint32x2_t b) A32: VADD. I32 Dd, Dn, Dm A64: ADD Vd.2S, Vn.2S, Vm.2S |
AddHighNarrowingLower(Vector128<Int16>, Vector128<Int16>) |
int8x8_t vaddhn_s16 (int16x8_t a, int16x8_t b) A32: VADDHN. I16 Dd, Qn, Qm A64: ADDHN Vd.8B, Vn.8H, Vm.8H |
AddHighNarrowingLower(Vector128<Int32>, Vector128<Int32>) |
int16x4_t vaddhn_s32 (int32x4_t a, int32x4_t b) A32: VADDHN. I32 Dd, Qn, Qm A64: ADDHN Vd.4H, Vn.4S, Vm.4S |
AddHighNarrowingLower(Vector128<Int64>, Vector128<Int64>) |
int32x2_t vaddhn_s64 (int64x2_t a, int64x2_t b) A32: VADDHN. I64 Dd, Qn, Qm A64: ADDHN Vd.2S, Vn.2D, Vm.2D |
AddHighNarrowingLower(Vector128<UInt16>, Vector128<UInt16>) |
uint8x8_t vaddhn_u16 (uint16x8_t a, uint16x8_t b) A32: VADDHN. I16 Dd, Qn, Qm A64: ADDHN Vd.8B, Vn.8H, Vm.8H |
AddHighNarrowingLower(Vector128<UInt32>, Vector128<UInt32>) |
uint16x4_t vaddhn_u32 (uint32x4_t a, uint32x4_t b) A32: VADDHN. I32 Dd, Qn, Qm A64: ADDHN Vd.4H, Vn.4S, Vm.4S |
AddHighNarrowingLower(Vector128<UInt64>, Vector128<UInt64>) |
uint32x2_t vaddhn_u64 (uint64x2_t a, uint64x2_t b) A32: VADDHN. I64 Dd, Qn, Qm A64: ADDHN Vd.2S, Vn.2D, Vm.2D |
AddHighNarrowingUpper(Vector64<Byte>, Vector128<UInt16>, Vector128<UInt16>) |
uint8x16_t vaddhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) A32: VADDHN. I16 Dd+1, Soru-Cevap, Soru-Cevap A64: ADDHN2 Vd.16B, Vn.8H, Vm.8H |
AddHighNarrowingUpper(Vector64<Int16>, Vector128<Int32>, Vector128<Int32>) |
int16x8_t vaddhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) A32: VADDHN. I32 Dd+1, Qn, Qm A64: ADDHN2 Vd.8H, Vn.4S, Vm.4S |
AddHighNarrowingUpper(Vector64<Int32>, Vector128<Int64>, Vector128<Int64>) |
int32x4_t vaddhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) A32: VADDHN. I64 Dd+1, Soru-Cevap, Soru-Cevap A64: ADDHN2 Vd.4S, Vn.2D, Vm.2D |
AddHighNarrowingUpper(Vector64<SByte>, Vector128<Int16>, Vector128<Int16>) |
int8x16_t vaddhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) A32: VADDHN. I16 Dd+1, Soru-Cevap, Soru-Cevap A64: ADDHN2 Vd.16B, Vn.8H, Vm.8H |
AddHighNarrowingUpper(Vector64<UInt16>, Vector128<UInt32>, Vector128<UInt32>) |
uint16x8_t vaddhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) A32: VADDHN. I32 Dd+1, Qn, Qm A64: ADDHN2 Vd.8H, Vn.4S, Vm.4S |
AddHighNarrowingUpper(Vector64<UInt32>, Vector128<UInt64>, Vector128<UInt64>) |
uint32x4_t vaddhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) A32: VADDHN. I64 Dd+1, Soru-Cevap, Soru-Cevap A64: ADDHN2 Vd.4S, Vn.2D, Vm.2D |
AddPairwise(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vpadd_u8 (uint8x8_t a, uint8x8_t b) A32: VPADD. I8 Dd, Dn, Dm A64: ADDP Vd.8B, Vn.8B, Vm.8B |
AddPairwise(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vpadd_s16 (int16x4_t a, int16x4_t b) A32: VPADD. I16 Dd, Dn, Dm A64: ADDP Vd.4H, Vn.4H, Vm.4H |
AddPairwise(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vpadd_s32 (int32x2_t a, int32x2_t b) A32: VPADD. I32 Dd, Dn, Dm A64: ADDP Vd.2S, Vn.2S, Vm.2S |
AddPairwise(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vpadd_s8 (int8x8_t a, int8x8_t b) A32: VPADD. I8 Dd, Dn, Dm A64: ADDP Vd.8B, Vn.8B, Vm.8B |
AddPairwise(Vector64<Single>, Vector64<Single>) |
float32x2_t vpadd_f32 (float32x2_t a, float32x2_t b) A32: VPADD. F32 Dd, Dn, Dm A64: FADDP Vd.2S, Vn.2S, Vm.2S |
AddPairwise(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vpadd_u16 (uint16x4_t a, uint16x4_t b) A32: VPADD. I16 Dd, Dn, Dm A64: ADDP Vd.4H, Vn.4H, Vm.4H |
AddPairwise(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vpadd_u32 (uint32x2_t a, uint32x2_t b) A32: VPADD. I32 Dd, Dn, Dm A64: ADDP Vd.2S, Vn.2S, Vm.2S |
AddPairwiseWidening(Vector128<Byte>) |
uint16x8_t vpaddlq_u8 (uint8x16_t a) A32: VPADDL. U8 Qd, Qm A64: UADDLP Vd.8H, Vn.16B |
AddPairwiseWidening(Vector128<Int16>) |
int32x4_t vpaddlq_s16 (int16x8_t a) A32: VPADDL. S16 Qd, Qm A64: SADDLP Vd.4S, Vn.8H |
AddPairwiseWidening(Vector128<Int32>) |
int64x2_t vpaddlq_s32 (int32x4_t a) A32: VPADDL. S32 Qd, Qm A64: SADDLP Vd.2D, Vn.4S |
AddPairwiseWidening(Vector128<SByte>) |
int16x8_t vpaddlq_s8 (int8x16_t a) A32: VPADDL. S8 Qd, Qm A64: SADDLP Vd.8H, Vn.16B |
AddPairwiseWidening(Vector128<UInt16>) |
uint32x4_t vpaddlq_u16 (uint16x8_t a) A32: VPADDL. U16 Qd, Qm A64: UADDLP Vd.4S, Vn.8H |
AddPairwiseWidening(Vector128<UInt32>) |
uint64x2_t vpaddlq_u32 (uint32x4_t a) A32: VPADDL. U32 Qd, Qm A64: UADDLP Vd.2D, Vn.4S |
AddPairwiseWidening(Vector64<Byte>) |
uint16x4_t vpaddl_u8 (uint8x8_t a) A32: VPADDL. U8 Dd, Dm A64: UADDLP Vd.4H, Vn.8B |
AddPairwiseWidening(Vector64<Int16>) |
int32x2_t vpaddl_s16 (int16x4_t a) A32: VPADDL. S16 Dd, Dm A64: SADDLP Vd.2S, Vn.4H |
AddPairwiseWidening(Vector64<SByte>) |
int16x4_t vpaddl_s8 (int8x8_t a) A32: VPADDL. S8 Dd, Dm A64: SADDLP Vd.4H, Vn.8B |
AddPairwiseWidening(Vector64<UInt16>) |
uint32x2_t vpaddl_u16 (uint16x4_t a) A32: VPADDL. U16 Dd, Dm A64: UADDLP Vd.2S, Vn.4H |
AddPairwiseWideningAndAdd(Vector128<Int16>, Vector128<SByte>) |
int16x8_t vpadalq_s8 (int16x8_t a, int8x16_t b) A32: VPADAL. S8 Qd, Qm A64: SADALP Vd.8H, Vn.16B |
AddPairwiseWideningAndAdd(Vector128<Int32>, Vector128<Int16>) |
int32x4_t vpadalq_s16 (int32x4_t a, int16x8_t b) A32: VPADAL. S16 Qd, Qm A64: SADALP Vd.4S, Vn.8H |
AddPairwiseWideningAndAdd(Vector128<Int64>, Vector128<Int32>) |
int64x2_t vpadalq_s32 (int64x2_t a, int32x4_t b) A32: VPADAL. S32 Qd, Qm A64: SADALP Vd.2D, Vn.4S |
AddPairwiseWideningAndAdd(Vector128<UInt16>, Vector128<Byte>) |
uint16x8_t vpadalq_u8 (uint16x8_t a, uint8x16_t b) A32: VPADAL. U8 Qd, Qm A64: UADALP Vd.8H, Vn.16B |
AddPairwiseWideningAndAdd(Vector128<UInt32>, Vector128<UInt16>) |
uint32x4_t vpadalq_u16 (uint32x4_t a, uint16x8_t b) A32: VPADAL. U16 Qd, Qm A64: UADALP Vd.4S, Vn.8H |
AddPairwiseWideningAndAdd(Vector128<UInt64>, Vector128<UInt32>) |
uint64x2_t vpadalq_u32 (uint64x2_t a, uint32x4_t b) A32: VPADAL. U32 Qd, Qm A64: UADALP Vd.2D, Vn.4S |
AddPairwiseWideningAndAdd(Vector64<Int16>, Vector64<SByte>) |
int16x4_t vpadal_s8 (int16x4_t a, int8x8_t b) A32: VPADAL. S8 Dd, Dm A64: SADALP Vd.4H, Vn.8B |
AddPairwiseWideningAndAdd(Vector64<Int32>, Vector64<Int16>) |
int32x2_t vpadal_s16 (int32x2_t a, int16x4_t b) A32: VPADAL. S16 Dd, Dm A64: SADALP Vd.2S, Vn.4H |
AddPairwiseWideningAndAdd(Vector64<UInt16>, Vector64<Byte>) |
uint16x4_t vpadal_u8 (uint16x4_t a, uint8x8_t b) A32: VPADAL. U8 Dd, Dm A64: UADALP Vd.4H, Vn.8B |
AddPairwiseWideningAndAdd(Vector64<UInt32>, Vector64<UInt16>) |
uint32x2_t vpadal_u16 (uint32x2_t a, uint16x4_t b) A32: VPADAL. U16 Dd, Dm A64: UADALP Vd.2S, Vn.4H |
AddPairwiseWideningAndAddScalar(Vector64<Int64>, Vector64<Int32>) |
int64x1_t vpadal_s32 (a, int32x2_t b int64x1_t) A32: VPADAL. S32 Dd, Dm A64: SADALP Vd.1D, Vn.2S |
AddPairwiseWideningAndAddScalar(Vector64<UInt64>, Vector64<UInt32>) |
uint64x1_t vpadal_u32 (a, uint32x2_t b uint64x1_t) A32: VPADAL. U32 Dd, Dm A64: UADALP Vd.1D, Vn.2S |
AddPairwiseWideningScalar(Vector64<Int32>) |
int64x1_t vpaddl_s32 (int32x2_t a) A32: VPADDL. S32 Dd, Dm A64: SADDLP Dd, Vn.2S |
AddPairwiseWideningScalar(Vector64<UInt32>) |
uint64x1_t vpaddl_u32 (uint32x2_t a) A32: VPADDL. U32 Dd, Dm A64: UADDLP Dd, Vn.2S |
AddRoundedHighNarrowingLower(Vector128<Int16>, Vector128<Int16>) |
int8x8_t vraddhn_s16 (int16x8_t a, int16x8_t b) A32: VRADDHN. I16 Dd, Qn, Qm A64: RADDHN Vd.8B, Vn.8H, Vm.8H |
AddRoundedHighNarrowingLower(Vector128<Int32>, Vector128<Int32>) |
int16x4_t vraddhn_s32 (int32x4_t a, int32x4_t b) A32: VRADDHN. I32 Dd, Qn, Qm A64: RADDHN Vd.4H, Vn.4S, Vm.4S |
AddRoundedHighNarrowingLower(Vector128<Int64>, Vector128<Int64>) |
int32x2_t vraddhn_s64 (int64x2_t a, int64x2_t b) A32: VRADDHN. I64 Dd, Qn, Qm A64: RADDHN Vd.2S, Vn.2D, Vm.2D |
AddRoundedHighNarrowingLower(Vector128<UInt16>, Vector128<UInt16>) |
uint8x8_t vraddhn_u16 (uint16x8_t a, uint16x8_t b) A32: VRADDHN. I16 Dd, Qn, Qm A64: RADDHN Vd.8B, Vn.8H, Vm.8H |
AddRoundedHighNarrowingLower(Vector128<UInt32>, Vector128<UInt32>) |
uint16x4_t vraddhn_u32 (uint32x4_t a, uint32x4_t b) A32: VRADDHN. I32 Dd, Qn, Qm A64: RADDHN Vd.4H, Vn.4S, Vm.4S |
AddRoundedHighNarrowingLower(Vector128<UInt64>, Vector128<UInt64>) |
uint32x2_t vraddhn_u64 (uint64x2_t a, uint64x2_t b) A32: VRADDHN. I64 Dd, Qn, Qm A64: RADDHN Vd.2S, Vn.2D, Vm.2D |
AddRoundedHighNarrowingUpper(Vector64<Byte>, Vector128<UInt16>, Vector128<UInt16>) |
uint8x16_t vraddhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) A32: VRADDHN. I16 Dd+1, Soru-Cevap, Soru-Cevap A64: RADDHN2 Vd.16B, Vn.8H, Vm.8H |
AddRoundedHighNarrowingUpper(Vector64<Int16>, Vector128<Int32>, Vector128<Int32>) |
int16x8_t vraddhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) A32: VRADDHN. I32 Dd+1, Qn, Qm A64: RADDHN2 Vd.8H, Vn.4S, Vm.4S |
AddRoundedHighNarrowingUpper(Vector64<Int32>, Vector128<Int64>, Vector128<Int64>) |
int32x4_t vraddhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) A32: VRADDHN. I64 Dd+1, Soru-Cevap, Soru-Cevap A64: RADDHN2 Vd.4S, Vn.2D, Vm.2D |
AddRoundedHighNarrowingUpper(Vector64<SByte>, Vector128<Int16>, Vector128<Int16>) |
int8x16_t vraddhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) A32: VRADDHN. I16 Dd+1, Soru-Cevap, Soru-Cevap A64: RADDHN2 Vd.16B, Vn.8H, Vm.8H |
AddRoundedHighNarrowingUpper(Vector64<UInt16>, Vector128<UInt32>, Vector128<UInt32>) |
uint16x8_t vraddhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) A32: VRADDHN. I32 Dd+1, Qn, Qm A64: RADDHN2 Vd.8H, Vn.4S, Vm.4S |
AddRoundedHighNarrowingUpper(Vector64<UInt32>, Vector128<UInt64>, Vector128<UInt64>) |
uint32x4_t vraddhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) A32: VRADDHN. I64 Dd+1, Soru-Cevap, Soru-Cevap A64: RADDHN2 Vd.4S, Vn.2D, Vm.2D |
AddSaturate(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vqaddq_u8 (uint8x16_t a, uint8x16_t b) A32: VQADD. U8 Qd, Qn, Qm A64: UQADD Vd.16B, Vn.16B, Vm.16B |
AddSaturate(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vqaddq_s16 (int16x8_t a, int16x8_t b) A32: VQADD. S16 Qd, Qn, Qm A64: SQADD Vd.8H, Vn.8H, Vm.8H |
AddSaturate(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vqaddq_s32 (int32x4_t a, int32x4_t b) A32: VQADD. S32 Qd, Qn, Qm A64: SQADD Vd.4S, Vn.4S, Vm.4S |
AddSaturate(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vqaddq_s64 (int64x2_t a, int64x2_t b) A32: VQADD. S64 Qd, Qn, Qm A64: SQADD Vd.2D, Vn.2D, Vm.2D |
AddSaturate(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vqaddq_s8 (int8x16_t a, int8x16_t b) A32: VQADD. S8 Qd, Qn, Qm A64: SQADD Vd.16B, Vn.16B, Vm.16B |
AddSaturate(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vqaddq_u16 (uint16x8_t a, uint16x8_t b) A32: VQADD. U16 Qd, Qn, Qm A64: UQADD Vd.8H, Vn.8H, Vm.8H |
AddSaturate(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vqaddq_u32 (a, uint32x4_t b uint32x4_t) A32: VQADD. U32 Qd, Qn, Qm A64: UQADD Vd.4S, Vn.4S, Vm.4S |
AddSaturate(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vqaddq_u64 (uint64x2_t a, uint64x2_t b) A32: VQADD. U64 Qd, Qn, Qm A64: UQADD Vd.2D, Vn.2D, Vm.2D |
AddSaturate(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vqadd_u8 (uint8x8_t a, uint8x8_t b) A32: VQADD. U8 Dd, Dn, Dm A64: UQADD Vd.8B, Vn.8B, Vm.8B |
AddSaturate(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vqadd_s16 (int16x4_t a, int16x4_t b) A32: VQADD. S16 Dd, Dn, Dm A64: SQADD Vd.4H, Vn.4H, Vm.4H |
AddSaturate(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vqadd_s32 (int32x2_t a, int32x2_t b) A32: VQADD. S32 Dd, Dn, Dm A64: SQADD Vd.2S, Vn.2S, Vm.2S |
AddSaturate(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vqadd_s8 (int8x8_t a, int8x8_t b) A32: VQADD. S8 Dd, Dn, Dm A64: SQADD Vd.8B, Vn.8B, Vm.8B |
AddSaturate(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vqadd_u16 (uint16x4_t a, uint16x4_t b) A32: VQADD. U16 Dd, Dn, Dm A64: UQADD Vd.4H, Vn.4H, Vm.4H |
AddSaturate(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vqadd_u32 (uint32x2_t a, uint32x2_t b) A32: VQADD. U32 Dd, Dn, Dm A64: UQADD Vd.2S, Vn.2S, Vm.2S |
AddSaturateScalar(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vqadd_s64 (int64x1_t a, int64x1_t b) A32: VQADD. S64 Dd, Dn, Dm A64: SQADD Dd, Dn, Dm |
AddSaturateScalar(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vqadd_u64 (uint64x1_t a, uint64x1_t b) A32: VQADD. U64 Dd, Dn, Dm A64: UQADD Dd, Dn, Dm |
AddScalar(Vector64<Double>, Vector64<Double>) |
float64x1_t vadd_f64 (float64x1_t a, float64x1_t b) A32: VADD. F64 Dd, Dn, Dm A64: FADD Dd, Dn, Dm |
AddScalar(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vadd_s64 (int64x1_t a, int64x1_t b) A32: VADD. I64 Dd, Dn, Dm A64: ADD Dd, Dn, Dm |
AddScalar(Vector64<Single>, Vector64<Single>) |
float32_t vadds_f32 (float32_t a, float32_t b) A32: VADD. F32 Sd, Sn, Sm A64: FADD Sd, Sn, Sm Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
AddScalar(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vadd_u64 (uint64x1_t a, uint64x1_t b) A32: VADD. I64 Dd, Dn, Dm A64: ADD Dd, Dn, Dm |
AddWideningLower(Vector128<Int16>, Vector64<SByte>) |
int16x8_t vaddw_s8 (int16x8_t a, int8x8_t b) A32: VADDW. S8 Qd, Qn, Dm A64: SADDW Vd.8H, Vn.8H, Vm.8B |
AddWideningLower(Vector128<Int32>, Vector64<Int16>) |
int32x4_t vaddw_s16 (int32x4_t a, int16x4_t b) A32: VADDW. S16 Qd, Qn, Dm A64: SADDW Vd.4S, Vn.4S, Vm.4H |
AddWideningLower(Vector128<Int64>, Vector64<Int32>) |
int64x2_t vaddw_s32 (int64x2_t a, int32x2_t b) A32: VADDW. S32 Qd, Qn, Dm A64: SADDW Vd.2D, Vn.2D, Vm.2S |
AddWideningLower(Vector128<UInt16>, Vector64<Byte>) |
uint16x8_t vaddw_u8 (uint16x8_t a, uint8x8_t b) A32: VADDW. U8 Qd, Qn, Dm A64: UADDW Vd.8H, Vn.8H, Vm.8B |
AddWideningLower(Vector128<UInt32>, Vector64<UInt16>) |
uint32x4_t vaddw_u16 (uint32x4_t a, uint16x4_t b) A32: VADDW. U16 Qd, Qn, Dm A64: UADDW Vd.4S, Vn.4S, Vm.4H |
AddWideningLower(Vector128<UInt64>, Vector64<UInt32>) |
uint64x2_t vaddw_u32 (uint64x2_t a, uint32x2_t b) A32: VADDW. U32 Qd, Qn, Dm A64: UADDW Vd.2D, Vn.2D, Vm.2S |
AddWideningLower(Vector64<Byte>, Vector64<Byte>) |
uint16x8_t vaddl_u8 (uint8x8_t a, uint8x8_t b) A32: VADDL. U8 Qd, Dn, Dm A64: UADDL Vd.8H, Vn.8B, Vm.8B |
AddWideningLower(Vector64<Int16>, Vector64<Int16>) |
int32x4_t vaddl_s16 (int16x4_t a, int16x4_t b) A32: VADDL. S16 Qd, Dn, Dm A64: SADDL Vd.4S, Vn.4H, Vm.4H |
AddWideningLower(Vector64<Int32>, Vector64<Int32>) |
int64x2_t vaddl_s32 (int32x2_t a, int32x2_t b) A32: VADDL. S32 Qd, Dn, Dm A64: SADDL Vd.2D, Vn.2S, Vm.2S |
AddWideningLower(Vector64<SByte>, Vector64<SByte>) |
int16x8_t vaddl_s8 (int8x8_t a, int8x8_t b) A32: VADDL. S8 Qd, Dn, Dm A64: SADDL Vd.8H, Vn.8B, Vm.8B |
AddWideningLower(Vector64<UInt16>, Vector64<UInt16>) |
uint32x4_t vaddl_u16 (uint16x4_t a, uint16x4_t b) A32: VADDL. U16 Qd, Dn, Dm A64: UADDL Vd.4S, Vn.4H, Vm.4H |
AddWideningLower(Vector64<UInt32>, Vector64<UInt32>) |
uint64x2_t vaddl_u32 (uint32x2_t a, uint32x2_t b) A32: VADDL. U32 Qd, Dn, Dm A64: UADDL Vd.2D, Vn.2S, Vm.2S |
AddWideningUpper(Vector128<Byte>, Vector128<Byte>) |
uint16x8_t vaddl_high_u8 (a, uint8x16_t b uint8x16_t) A32: VADDL. U8 Qd, Dn+1, Dm+1 A64: UADDL2 Vd.8H, Vn.16B, Vm.16B |
AddWideningUpper(Vector128<Int16>, Vector128<Int16>) |
int32x4_t vaddl_high_s16 (int16x8_t a, int16x8_t b) A32: VADDL. S16 Qd, Dn+1, Dm+1 A64: SADDL2 Vd.4S, Vn.8H, Vm.8H |
AddWideningUpper(Vector128<Int16>, Vector128<SByte>) |
int16x8_t vaddw_high_s8 (int16x8_t a, int8x16_t b) A32: VADDW. S8 Qd, Qn, Dm+1 A64: SADDW2 Vd.8H, Vn.8H, Vm.16B |
AddWideningUpper(Vector128<Int32>, Vector128<Int16>) |
int32x4_t vaddw_high_s16 (int32x4_t a, int16x8_t b) A32: VADDW. S16 Qd, Qn, Dm+1 A64: SADDW2 Vd.4S, Vn.4S, Vm.8H |
AddWideningUpper(Vector128<Int32>, Vector128<Int32>) |
int64x2_t vaddl_high_s32 (int32x4_t a, int32x4_t b) A32: VADDL. S32 Qd, Dn+1, Dm+1 A64: SADDL2 Vd.2D, Vn.4S, Vm.4S |
AddWideningUpper(Vector128<Int64>, Vector128<Int32>) |
int64x2_t vaddw_high_s32 (int64x2_t a, int32x4_t b) A32: VADDW. S32 Qd, Qn, Dm+1 A64: SADDW2 Vd.2D, Vn.2D, Vm.4S |
AddWideningUpper(Vector128<SByte>, Vector128<SByte>) |
int16x8_t vaddl_high_s8 (int8x16_t a, int8x16_t b) A32: VADDL. S8 Qd, Dn+1, Dm+1 A64: SADDL2 Vd.8H, Vn.16B, Vm.16B |
AddWideningUpper(Vector128<UInt16>, Vector128<Byte>) |
uint16x8_t vaddw_high_u8 (uint16x8_t a, uint8x16_t b) A32: VADDW. U8 Qd, Qn, Dm+1 A64: UADDW2 Vd.8H, Vn.8H, Vm.16B |
AddWideningUpper(Vector128<UInt16>, Vector128<UInt16>) |
uint32x4_t vaddl_high_u16 (uint16x8_t a, uint16x8_t b) A32: VADDL. U16 Qd, Dn+1, Dm+1 A64: UADDL2 Vd.4S, Vn.8H, Vm.8H |
AddWideningUpper(Vector128<UInt32>, Vector128<UInt16>) |
uint32x4_t vaddw_high_u16 (uint32x4_t a, uint16x8_t b) A32: VADDW. U16 Qd, Qn, Dm+1 A64: UADDW2 Vd.4S, Vn.4S, Vm.8H |
AddWideningUpper(Vector128<UInt32>, Vector128<UInt32>) |
uint64x2_t vaddl_high_u32 (uint32x4_t a, uint32x4_t b) A32: VADDL. U32 Qd, Dn+1, Dm+1 A64: UADDL2 Vd.2D, Vn.4S, Vm.4S |
AddWideningUpper(Vector128<UInt64>, Vector128<UInt32>) |
uint64x2_t vaddw_high_u32 (uint64x2_t a, uint32x4_t b) A32: VADDW. U32 Qd, Qn, Dm+1 A64: UADDW2 Vd.2D, Vn.2D, Vm.4S |
And(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vandq_u8 (uint8x16_t a, uint8x16_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B |
And(Vector128<Double>, Vector128<Double>) |
float64x2_t vandq_f64 (float64x2_t a, float64x2_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
And(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vandq_s16 (int16x8_t a, int16x8_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B |
And(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vandq_s32 (int32x4_t a, int32x4_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B |
And(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vandq_s64 (int64x2_t a, int64x2_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B |
And(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vandq_s8 (int8x16_t a, int8x16_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B |
And(Vector128<Single>, Vector128<Single>) |
float32x4_t vandq_f32 (float32x4_t a, float32x4_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
And(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vandq_u16 (uint16x8_t a, uint16x8_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B |
And(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vandq_u32 (a, uint32x4_t b uint32x4_t) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B |
And(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vandq_u64 (uint64x2_t a, uint64x2_t b) A32: VAND Qd, Qn, Qm A64: AND Vd.16B, Vn.16B, Vm.16B |
And(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vand_u8 (uint8x8_t a, uint8x8_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B |
And(Vector64<Double>, Vector64<Double>) |
float64x1_t vand_f64 (a, float64x1_t b float64x1_t) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
And(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vand_s16 (int16x4_t a, int16x4_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B |
And(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vand_s32 (int32x2_t a, int32x2_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B |
And(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vand_s64 (int64x1_t a, int64x1_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B |
And(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vand_s8 (int8x8_t a, int8x8_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B |
And(Vector64<Single>, Vector64<Single>) |
float32x2_t vand_f32 (float32x2_t a, float32x2_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
And(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vand_u16 (uint16x4_t a, uint16x4_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B |
And(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vand_u32 (uint32x2_t a, uint32x2_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B |
And(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vand_u64 (uint64x1_t a, uint64x1_t b) A32: VAND Dd, Dn, Dm A64: AND Vd.8B, Vn.8B, Vm.8B |
BitwiseClear(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vbicq_u8 (uint8x16_t a, uint8x16_t b) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B |
BitwiseClear(Vector128<Double>, Vector128<Double>) |
float64x2_t vbicq_f64 (float64x2_t a, float64x2_t b) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
BitwiseClear(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vbicq_s16 (a, int16x8_t b int16x8_t) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B |
BitwiseClear(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vbicq_s32 (int32x4_t a, int32x4_t b) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B |
BitwiseClear(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vbicq_s64 (int64x2_t a, int64x2_t b) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B |
BitwiseClear(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vbicq_s8 (int8x16_t a, int8x16_t b) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B |
BitwiseClear(Vector128<Single>, Vector128<Single>) |
float32x4_t vbicq_f32 (float32x4_t a, float32x4_t b) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
BitwiseClear(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vbicq_u16 (uint16x8_t a, uint16x8_t b) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B |
BitwiseClear(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vbicq_u32 (uint32x4_t a, uint32x4_t b) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B |
BitwiseClear(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vbicq_u64 (a, uint64x2_t b uint64x2_t) A32: VBIC Qd, Qn, Qm A64: BIC Vd.16B, Vn.16B, Vm.16B |
BitwiseClear(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vbic_u8 (uint8x8_t a, uint8x8_t b) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B |
BitwiseClear(Vector64<Double>, Vector64<Double>) |
float64x1_t vbic_f64 (a, float64x1_t b float64x1_t) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
BitwiseClear(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vbic_s16 (int16x4_t a, int16x4_t b) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B |
BitwiseClear(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vbic_s32 (int32x2_t a, int32x2_t b) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B |
BitwiseClear(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vbic_s64 (int64x1_t a, int64x1_t b) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B |
BitwiseClear(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vbic_s8 (int8x8_t a, int8x8_t b) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B |
BitwiseClear(Vector64<Single>, Vector64<Single>) |
float32x2_t vbic_f32 (float32x2_t a, float32x2_t b) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
BitwiseClear(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vbic_u16 (uint16x4_t a, uint16x4_t b) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B |
BitwiseClear(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vbic_u32 (uint32x2_t a, uint32x2_t b) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B |
BitwiseClear(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vbic_u64 (a, uint64x1_t b uint64x1_t) A32: VBIC Dd, Dn, Dm A64: BIC Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vbslq_u8 (a, uint8x16_t b, uint8x16_t c uint8x16_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<Double>, Vector128<Double>, Vector128<Double>) |
float64x2_t vbslq_f64 (uint64x2_t a, float64x2_t b, float64x2_t c) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>) |
int16x8_t vbslq_s16 (a, int16x8_t b, int16x8_t c uint16x8_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>) |
int32x4_t vbslq_s32 (a, int32x4_t b, int32x4_t c uint32x4_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>) |
int64x2_t vbslq_s64 (a, int64x2_t b, int64x2_t c uint64x2_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>) |
int8x16_t vbslq_s8 (a, int8x16_t b, int8x16_t c uint8x16_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<Single>, Vector128<Single>, Vector128<Single>) |
float32x4_t vbslq_f32 (a, float32x4_t b, float32x4_t c uint32x4_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vbslq_u16 (a, uint16x8_t b, uint16x8_t c uint16x8_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vbslq_u32 (a, uint32x4_t b, uint32x4_t c uint32x4_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vbslq_u64 (a, uint64x2_t b, uint64x2_t c uint64x2_t) A32: VBSL Qd, Qn, Qm A64: BSL Vd.16B, Vn.16B, Vm.16B |
BitwiseSelect(Vector64<Byte>, Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vbsl_u8 (a, uint8x8_t b, uint8x8_t c uint8x8_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<Double>, Vector64<Double>, Vector64<Double>) |
float64x1_t vbsl_f64 (a, float64x1_t b, float64x1_t c uint64x1_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<Int16>, Vector64<Int16>, Vector64<Int16>) |
int16x4_t vbsl_s16 (a, int16x4_t b, int16x4_t c uint16x4_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<Int32>, Vector64<Int32>, Vector64<Int32>) |
int32x2_t vbsl_s32 (a, int32x2_t b, int32x2_t c uint32x2_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<Int64>, Vector64<Int64>, Vector64<Int64>) |
int64x1_t vbsl_s64 (a, int64x1_t b, int64x1_t c uint64x1_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<SByte>, Vector64<SByte>, Vector64<SByte>) |
int8x8_t vbsl_s8 (a, int8x8_t b, int8x8_t c uint8x8_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<Single>, Vector64<Single>, Vector64<Single>) |
float32x2_t vbsl_f32 (a, float32x2_t b, float32x2_t c uint32x2_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<UInt16>, Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vbsl_u16 (a, uint16x4_t b, uint16x4_t c uint16x4_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<UInt32>, Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vbsl_u32 (a, uint32x2_t b, uint32x2_t c uint32x2_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
BitwiseSelect(Vector64<UInt64>, Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vbsl_u64 (a, uint64x1_t b, uint64x1_t c uint64x1_t) A32: VBSL Dd, Dn, Dm A64: BSL Vd.8B, Vn.8B, Vm.8B |
Ceiling(Vector128<Single>) |
float32x4_t vrndpq_f32 (float32x4_t a) A32: VRINTP. F32 Qd, Qm A64: FRINTP Vd.4S, Vn.4S |
Ceiling(Vector64<Single>) |
float32x2_t vrndp_f32 (float32x2_t a) A32: VRINTP. F32 Dd, Dm A64: FRINTP Vd.2S, Vn.2S |
CeilingScalar(Vector64<Double>) |
float64x1_t vrndp_f64 (float64x1_t a) A32: VRINTP. F64 Dd, Dm A64: FRINTP Dd, Dn |
CeilingScalar(Vector64<Single>) |
float32_t vrndps_f32 (float32_t a) A32: VRINTP. F32 Sd, Sm A64: FRINTP Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
CompareEqual(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vceqq_u8 (uint8x16_t a, uint8x16_t b) A32: VCEQ. I8 Qd, Qn, Qm A64: CMEQ Vd.16B, Vn.16B, Vm.16B |
CompareEqual(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vceqq_s16 (int16x8_t a, int16x8_t b) A32: VCEQ. I16 Qd, Qn, Qm A64: CMEQ Vd.8H, Vn.8H, Vm.8H |
CompareEqual(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vceqq_s32 (int32x4_t a, int32x4_t b) A32: VCEQ. I32 Qd, Qn, Qm A64: CMEQ Vd.4S, Vn.4S, Vm.4S |
CompareEqual(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vceqq_s8 (int8x16_t a, int8x16_t b) A32: VCEQ. I8 Qd, Qn, Qm A64: CMEQ Vd.16B, Vn.16B, Vm.16B |
CompareEqual(Vector128<Single>, Vector128<Single>) |
uint32x4_t vceqq_f32 (float32x4_t a, float32x4_t b) A32: VCEQ. F32 Qd, Qn, Qm A64: FCMEQ Vd.4S, Vn.4S, Vm.4S |
CompareEqual(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vceqq_u16 (uint16x8_t a, uint16x8_t b) A32: VCEQ. I16 Qd, Qn, Qm A64: CMEQ Vd.8H, Vn.8H, Vm.8H |
CompareEqual(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vceqq_u32 (uint32x4_t a, uint32x4_t b) A32: VCEQ. I32 Qd, Qn, Qm A64: CMEQ Vd.4S, Vn.4S, Vm.4S |
CompareEqual(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vceq_u8 (uint8x8_t a, uint8x8_t b) A32: VCEQ. I8 Dd, Dn, Dm A64: CMEQ Vd.8B, Vn.8B, Vm.8B |
CompareEqual(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vceq_s16 (int16x4_t a, int16x4_t b) A32: VCEQ. I16 Dd, Dn, Dm A64: CMEQ Vd.4H, Vn.4H, Vm.4H |
CompareEqual(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vceq_s32 (int32x2_t a, int32x2_t b) A32: VCEQ. I32 Dd, Dn, Dm A64: CMEQ Vd.2S, Vn.2S, Vm.2S |
CompareEqual(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vceq_s8 (int8x8_t a, int8x8_t b) A32: VCEQ. I8 Dd, Dn, Dm A64: CMEQ Vd.8B, Vn.8B, Vm.8B |
CompareEqual(Vector64<Single>, Vector64<Single>) |
uint32x2_t vceq_f32 (float32x2_t a, float32x2_t b) A32: VCEQ. F32 Dd, Dn, Dm A64: FCMEQ Vd.2S, Vn.2S, Vm.2S |
CompareEqual(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vceq_u16 (uint16x4_t a, uint16x4_t b) A32: VCEQ. I16 Dd, Dn, Dm A64: CMEQ Vd.4H, Vn.4H, Vm.4H |
CompareEqual(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vceq_u32 (uint32x2_t a, uint32x2_t b) A32: VCEQ. I32 Dd, Dn, Dm A64: CMEQ Vd.2S, Vn.2S, Vm.2S |
CompareGreaterThan(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vcgtq_u8 (uint8x16_t a, uint8x16_t b) A32: VCGT. U8 Qd, Qn, Qm A64: CMHI Vd.16B, Vn.16B, Vm.16B |
CompareGreaterThan(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vcgtq_s16 (a, int16x8_t b int16x8_t) A32: VCGT. S16 Qd, Qn, Qm A64: CMGT Vd.8H, Vn.8H, Vm.8H |
CompareGreaterThan(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vcgtq_s32 (int32x4_t a, int32x4_t b) A32: VCGT. S32 Qd, Qn, Qm A64: CMGT Vd.4S, Vn.4S, Vm.4S |
CompareGreaterThan(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vcgtq_s8 (int8x16_t a, int8x16_t b) A32: VCGT. S8 Qd, Qn, Qm A64: CMGT Vd.16B, Vn.16B, Vm.16B |
CompareGreaterThan(Vector128<Single>, Vector128<Single>) |
uint32x4_t vcgtq_f32 (float32x4_t a, float32x4_t b) A32: VCGT. F32 Qd, Qn, Qm A64: FCMGT Vd.4S, Vn.4S, Vm.4S |
CompareGreaterThan(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vcgtq_u16 (uint16x8_t a, uint16x8_t b) A32: VCGT. U16 Qd, Qn, Qm A64: CMHI Vd.8H, Vn.8H, Vm.8H |
CompareGreaterThan(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vcgtq_u32 (uint32x4_t a, uint32x4_t b) A32: VCGT. U32 Qd, Qn, Qm A64: CMHI Vd.4S, Vn.4S, Vm.4S |
CompareGreaterThan(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vcgt_u8 (uint8x8_t a, uint8x8_t b) A32: VCGT. U8 Dd, Dn, Dm A64: CMHI Vd.8B, Vn.8B, Vm.8B |
CompareGreaterThan(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vcgt_s16 (int16x4_t a, int16x4_t b) A32: VCGT. S16 Dd, Dn, Dm A64: CMGT Vd.4H, Vn.4H, Vm.4H |
CompareGreaterThan(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vcgt_s32 (int32x2_t a, int32x2_t b) A32: VCGT. S32 Dd, Dn, Dm A64: CMGT Vd.2S, Vn.2S, Vm.2S |
CompareGreaterThan(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vcgt_s8 (int8x8_t a, int8x8_t b) A32: VCGT. S8 Dd, Dn, Dm A64: CMGT Vd.8B, Vn.8B, Vm.8B |
CompareGreaterThan(Vector64<Single>, Vector64<Single>) |
uint32x2_t vcgt_f32 (float32x2_t a, float32x2_t b) A32: VCGT. F32 Dd, Dn, Dm A64: FCMGT Vd.2S, Vn.2S, Vm.2S |
CompareGreaterThan(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vcgt_u16 (uint16x4_t a, uint16x4_t b) A32: VCGT. U16 Dd, Dn, Dm A64: CMHI Vd.4H, Vn.4H, Vm.4H |
CompareGreaterThan(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vcgt_u32 (uint32x2_t a, uint32x2_t b) A32: VCGT. U32 Dd, Dn, Dm A64: CMHI Vd.2S, Vn.2S, Vm.2S |
CompareGreaterThanOrEqual(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vcgeq_u8 (uint8x16_t a, uint8x16_t b) A32: VCGE. U8 Qd, Qn, Qm A64: CMHS Vd.16B, Vn.16B, Vm.16B |
CompareGreaterThanOrEqual(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vcgeq_s16 (int16x8_t a, int16x8_t b) A32: VCGE. S16 Qd, Qn, Qm A64: CMGE Vd.8H, Vn.8H, Vm.8H |
CompareGreaterThanOrEqual(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vcgeq_s32 (int32x4_t a, int32x4_t b) A32: VCGE. S32 Qd, Qn, Qm A64: CMGE Vd.4S, Vn.4S, Vm.4S |
CompareGreaterThanOrEqual(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vcgeq_s8 (int8x16_t a, int8x16_t b) A32: VCGE. S8 Qd, Qn, Qm A64: CMGE Vd.16B, Vn.16B, Vm.16B |
CompareGreaterThanOrEqual(Vector128<Single>, Vector128<Single>) |
uint32x4_t vcgeq_f32 (float32x4_t a, float32x4_t b) A32: VCGE. F32 Qd, Qn, Qm A64: FCMGE Vd.4S, Vn.4S, Vm.4S |
CompareGreaterThanOrEqual(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vcgeq_u16 (uint16x8_t a, uint16x8_t b) A32: VCGE. U16 Qd, Qn, Qm A64: CMHS Vd.8H, Vn.8H, Vm.8H |
CompareGreaterThanOrEqual(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vcgeq_u32 (a, uint32x4_t b uint32x4_t) A32: VCGE. U32 Qd, Qn, Qm A64: CMHS Vd.4S, Vn.4S, Vm.4S |
CompareGreaterThanOrEqual(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vcge_u8 (uint8x8_t a, uint8x8_t b) A32: VCGE. U8 Dd, Dn, Dm A64: CMHS Vd.8B, Vn.8B, Vm.8B |
CompareGreaterThanOrEqual(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vcge_s16 (int16x4_t a, int16x4_t b) A32: VCGE. S16 Dd, Dn, Dm A64: CMGE Vd.4H, Vn.4H, Vm.4H |
CompareGreaterThanOrEqual(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vcge_s32 (int32x2_t a, int32x2_t b) A32: VCGE. S32 Dd, Dn, Dm A64: CMGE Vd.2S, Vn.2S, Vm.2S |
CompareGreaterThanOrEqual(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vcge_s8 (int8x8_t a, int8x8_t b) A32: VCGE. S8 Dd, Dn, Dm A64: CMGE Vd.8B, Vn.8B, Vm.8B |
CompareGreaterThanOrEqual(Vector64<Single>, Vector64<Single>) |
uint32x2_t vcge_f32 (float32x2_t a, float32x2_t b) A32: VCGE. F32 Dd, Dn, Dm A64: FCMGE Vd.2S, Vn.2S, Vm.2S |
CompareGreaterThanOrEqual(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vcge_u16 (a, uint16x4_t b uint16x4_t) A32: VCGE. U16 Dd, Dn, Dm A64: CMHS Vd.4H, Vn.4H, Vm.4H |
CompareGreaterThanOrEqual(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vcge_u32 (uint32x2_t a, uint32x2_t b) A32: VCGE. U32 Dd, Dn, Dm A64: CMHS Vd.2S, Vn.2S, Vm.2S |
CompareLessThan(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vcltq_u8 (uint8x16_t a, uint8x16_t b) A32: VCLT. U8 Qd, Qn, Qm A64: CMHI Vd.16B, Vn.16B, Vm.16B |
CompareLessThan(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vcltq_s16 (int16x8_t a, int16x8_t b) A32: VCLT. S16 Qd, Qn, Qm A64: CMGT Vd.8H, Vn.8H, Vm.8H |
CompareLessThan(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vcltq_s32 (int32x4_t a, int32x4_t b) A32: VCLT. S32 Qd, Qn, Qm A64: CMGT Vd.4S, Vn.4S, Vm.4S |
CompareLessThan(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vcltq_s8 (a, int8x16_t b int8x16_t) A32: VCLT. S8 Qd, Qn, Qm A64: CMGT Vd.16B, Vn.16B, Vm.16B |
CompareLessThan(Vector128<Single>, Vector128<Single>) |
uint32x4_t vcltq_f32 (float32x4_t a, float32x4_t b) A32: VCLT. F32 Qd, Qn, Qm A64: FCMGT Vd.4S, Vn.4S, Vm.4S |
CompareLessThan(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vcltq_u16 (uint16x8_t a, uint16x8_t b) A32: VCLT. U16 Qd, Qn, Qm A64: CMHI Vd.8H, Vn.8H, Vm.8H |
CompareLessThan(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vcltq_u32 (uint32x4_t a, uint32x4_t b) A32: VCLT. U32 Qd, Qn, Qm A64: CMHI Vd.4S, Vn.4S, Vm.4S |
CompareLessThan(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vclt_u8 (uint8x8_t a, uint8x8_t b) A32: VCLT. U8 Dd, Dn, Dm A64: CMHI Vd.8B, Vn.8B, Vm.8B |
CompareLessThan(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vclt_s16 (a, int16x4_t b int16x4_t) A32: VCLT. S16 Dd, Dn, Dm A64: CMGT Vd.4H, Vn.4H, Vm.4H |
CompareLessThan(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vclt_s32 (int32x2_t a, int32x2_t b) A32: VCLT. S32 Dd, Dn, Dm A64: CMGT Vd.2S, Vn.2S, Vm.2S |
CompareLessThan(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vclt_s8 (int8x8_t a, int8x8_t b) A32: VCLT. S8 Dd, Dn, Dm A64: CMGT Vd.8B, Vn.8B, Vm.8B |
CompareLessThan(Vector64<Single>, Vector64<Single>) |
uint32x2_t vclt_f32 (float32x2_t a, float32x2_t b) A32: VCLT. F32 Dd, Dn, Dm A64: FCMGT Vd.2S, Vn.2S, Vm.2S |
CompareLessThan(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vclt_u16 (uint16x4_t a, uint16x4_t b) A32: VCLT. U16 Dd, Dn, Dm A64: CMHI Vd.4H, Vn.4H, Vm.4H |
CompareLessThan(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vclt_u32 (uint32x2_t a, uint32x2_t b) A32: VCLT. U32 Dd, Dn, Dm A64: CMHI Vd.2S, Vn.2S, Vm.2S |
CompareLessThanOrEqual(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vcleq_u8 (uint8x16_t a, uint8x16_t b) A32: VCLE. U8 Qd, Qn, Qm A64: CMHS Vd.16B, Vn.16B, Vm.16B |
CompareLessThanOrEqual(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vcleq_s16 (int16x8_t a, int16x8_t b) A32: VCLE. S16 Qd, Qn, Qm A64: CMGE Vd.8H, Vn.8H, Vm.8H |
CompareLessThanOrEqual(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vcleq_s32 (int32x4_t a, int32x4_t b) A32: VCLE. S32 Qd, Qn, Qm A64: CMGE Vd.4S, Vn.4S, Vm.4S |
CompareLessThanOrEqual(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vcleq_s8 (int8x16_t a, int8x16_t b) A32: VCLE. S8 Qd, Qn, Qm A64: CMGE Vd.16B, Vn.16B, Vm.16B |
CompareLessThanOrEqual(Vector128<Single>, Vector128<Single>) |
uint32x4_t vcleq_f32 (float32x4_t a, float32x4_t b) A32: VCLE. F32 Qd, Qn, Qm A64: FCMGE Vd.4S, Vn.4S, Vm.4S |
CompareLessThanOrEqual(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vcleq_u16 (uint16x8_t a, uint16x8_t b) A32: VCLE. U16 Qd, Qn, Qm A64: CMHS Vd.8H, Vn.8H, Vm.8H |
CompareLessThanOrEqual(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vcleq_u32 (uint32x4_t a, uint32x4_t b) A32: VCLE. U32 Qd, Qn, Qm A64: CMHS Vd.4S, Vn.4S, Vm.4S |
CompareLessThanOrEqual(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vcle_u8 (a, uint8x8_t b uint8x8_t) A32: VCLE. U8 Dd, Dn, Dm A64: CMHS Vd.8B, Vn.8B, Vm.8B |
CompareLessThanOrEqual(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vcle_s16 (int16x4_t a, int16x4_t b) A32: VCLE. S16 Dd, Dn, Dm A64: CMGE Vd.4H, Vn.4H, Vm.4H |
CompareLessThanOrEqual(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vcle_s32 (int32x2_t a, int32x2_t b) A32: VCLE. S32 Dd, Dn, Dm A64: CMGE Vd.2S, Vn.2S, Vm.2S |
CompareLessThanOrEqual(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vcle_s8 (int8x8_t a, int8x8_t b) A32: VCLE. S8 Dd, Dn, Dm A64: CMGE Vd.8B, Vn.8B, Vm.8B |
CompareLessThanOrEqual(Vector64<Single>, Vector64<Single>) |
uint32x2_t vcle_f32 (float32x2_t a, float32x2_t b) A32: VCLE. F32 Dd, Dn, Dm A64: FCMGE Vd.2S, Vn.2S, Vm.2S |
CompareLessThanOrEqual(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vcle_u16 (uint16x4_t a, uint16x4_t b) A32: VCLE. U16 Dd, Dn, Dm A64: CMHS Vd.4H, Vn.4H, Vm.4H |
CompareLessThanOrEqual(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vcle_u32 (uint32x2_t a, uint32x2_t b) A32: VCLE. U32 Dd, Dn, Dm A64: CMHS Vd.2S, Vn.2S, Vm.2S |
CompareTest(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vtstq_u8 (a, uint8x16_t b uint8x16_t) A32: VTST.8 Qd, Qn, Qm A64: CMTST Vd.16B, Vn.16B, Vm.16B |
CompareTest(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vtstq_s16 (int16x8_t a, int16x8_t b) A32: VTST.16 Qd, Qn, Qm A64: CMTST Vd.8H, Vn.8H, Vm.8H |
CompareTest(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vtstq_s32 (int32x4_t a, int32x4_t b) A32: VTST.32 Qd, Qn, Qm A64: CMTST Vd.4S, Vn.4S, Vm.4S |
CompareTest(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vtstq_s8 (int8x16_t a, int8x16_t b) A32: VTST.8 Qd, Qn, Qm A64: CMTST Vd.16B, Vn.16B, Vm.16B |
CompareTest(Vector128<Single>, Vector128<Single>) |
uint32x4_t vtstq_f32 (float32x4_t a, float32x4_t b) A32: VTST.32 Qd, Qn, Qm A64: CMTST Vd.4S, Vn.4S, Vm.4S Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
CompareTest(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vtstq_u16 (uint16x8_t a, uint16x8_t b) A32: VTST.16 Qd, Qn, Qm A64: CMTST Vd.8H, Vn.8H, Vm.8H |
CompareTest(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vtstq_u32 (uint32x4_t a, uint32x4_t b) A32: VTST.32 Qd, Qn, Qm A64: CMTST Vd.4S, Vn.4S, Vm.4S |
CompareTest(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vtst_u8 (uint8x8_t a, uint8x8_t b) A32: VTST.8 Dd, Dn, Dm A64: CMTST Vd.8B, Vn.8B, Vm.8B |
CompareTest(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vtst_s16 (int16x4_t a, int16x4_t b) A32: VTST.16 Dd, Dn, Dm A64: CMTST Vd.4H, Vn.4H, Vm.4H |
CompareTest(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vtst_s32 (int32x2_t a, int32x2_t b) A32: VTST.32 Dd, Dn, Dm A64: CMTST Vd.2S, Vn.2S, Vm.2S |
CompareTest(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vtst_s8 (int8x8_t a, int8x8_t b) A32: VTST.8 Dd, Dn, Dm A64: CMTST Vd.8B, Vn.8B, Vm.8B |
CompareTest(Vector64<Single>, Vector64<Single>) |
uint32x2_t vtst_f32 (float32x2_t a, float32x2_t b) A32: VTST.32 Dd, Dn, Dm A64: CMTST Vd.2S, Vn.2S, Vm.2S Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
CompareTest(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vtst_u16 (uint16x4_t a, uint16x4_t b) A32: VTST.16 Dd, Dn, Dm A64: CMTST Vd.4H, Vn.4H, Vm.4H |
CompareTest(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vtst_u32 (uint32x2_t a, uint32x2_t b) A32: VTST.32 Dd, Dn, Dm A64: CMTST Vd.2S, Vn.2S, Vm.2S |
ConvertToInt32RoundAwayFromZero(Vector128<Single>) |
int32x4_t vcvtaq_s32_f32 (float32x4_t a) A32: VCVTA. S32. F32 Qd, Qm A64: FCVTAS Vd.4S, Vn.4S |
ConvertToInt32RoundAwayFromZero(Vector64<Single>) |
int32x2_t vcvta_s32_f32 (float32x2_t a) A32: VCVTA. S32. F32 Dd, Dm A64: FCVTAS Vd.2S, Vn.2S |
ConvertToInt32RoundAwayFromZeroScalar(Vector64<Single>) |
int32_t vcvtas_s32_f32 (float32_t a) A32: VCVTA. S32. F32 Sd, Sm A64: FCVTAS Sd, Sn |
ConvertToInt32RoundToEven(Vector128<Single>) |
int32x4_t vcvtnq_s32_f32 (a float32x4_t) A32: VCVTN. S32. F32 Qd, Qm A64: FCVTNS Vd.4S, Vn.4S |
ConvertToInt32RoundToEven(Vector64<Single>) |
int32x2_t vcvtn_s32_f32 (float32x2_t a) A32: VCVTN. S32. F32 Dd, Dm A64: FCVTNS Vd.2S, Vn.2S |
ConvertToInt32RoundToEvenScalar(Vector64<Single>) |
int32_t vcvtns_s32_f32 (float32_t a) A32: VCVTN. S32. F32 Sd, Sm A64: FCVTNS Sd, Sn |
ConvertToInt32RoundToNegativeInfinity(Vector128<Single>) |
int32x4_t vcvtmq_s32_f32 (float32x4_t a) A32: VCVTM. S32. F32 Qd, Qm A64: FCVTMS Vd.4S, Vn.4S |
ConvertToInt32RoundToNegativeInfinity(Vector64<Single>) |
int32x2_t vcvtm_s32_f32 (float32x2_t a) A32: VCVTM. S32. F32 Dd, Dm A64: FCVTMS Vd.2S, Vn.2S |
ConvertToInt32RoundToNegativeInfinityScalar(Vector64<Single>) |
int32_t vcvtms_s32_f32 (float32_t a) A32: VCVTM. S32. F32 Sd, Sm A64: FCVTMS Sd, Sn |
ConvertToInt32RoundToPositiveInfinity(Vector128<Single>) |
int32x4_t vcvtpq_s32_f32 (float32x4_t a) A32: VCVTP. S32. F32 Qd, Qm A64: FCVTPS Vd.4S, Vn.4S |
ConvertToInt32RoundToPositiveInfinity(Vector64<Single>) |
int32x2_t vcvtp_s32_f32 (float32x2_t a) A32: VCVTP. S32. F32 Dd, Dm A64: FCVTPS Vd.2S, Vn.2S |
ConvertToInt32RoundToPositiveInfinityScalar(Vector64<Single>) |
int32_t vcvtps_s32_f32 (float32_t a) A32: VCVTP. S32. F32 Sd, Sm A64: FCVTPS Sd, Sn |
ConvertToInt32RoundToZero(Vector128<Single>) |
int32x4_t vcvtq_s32_f32 (float32x4_t a) A32: VCVT. S32. F32 Qd, Qm A64: FCVTZS Vd.4S, Vn.4S |
ConvertToInt32RoundToZero(Vector64<Single>) |
int32x2_t vcvt_s32_f32 (float32x2_t a) A32: VCVT. S32. F32 Dd, Dm A64: FCVTZS Vd.2S, Vn.2S |
ConvertToInt32RoundToZeroScalar(Vector64<Single>) |
int32_t vcvts_s32_f32 (float32_t a) A32: VCVT. S32. F32 Sd, Sm A64: FCVTZS Sd, Sn |
ConvertToSingle(Vector128<Int32>) |
float32x4_t vcvtq_f32_s32 (int32x4_t a) A32: VCVT. F32. S32 Qd, Qm A64: SCVTF Vd.4S, Vn.4S |
ConvertToSingle(Vector128<UInt32>) |
float32x4_t vcvtq_f32_u32 (uint32x4_t a) A32: VCVT. F32. U32 Qd, Qm A64: UCVTF Vd.4S, Vn.4S |
ConvertToSingle(Vector64<Int32>) |
float32x2_t vcvt_f32_s32 (int32x2_t a) A32: VCVT. F32. S32 Dd, Dm A64: SCVTF Vd.2S, Vn.2S |
ConvertToSingle(Vector64<UInt32>) |
float32x2_t vcvt_f32_u32 (uint32x2_t a) A32: VCVT. F32. U32 Dd, Dm A64: UCVTF Vd.2S, Vn.2S |
ConvertToSingleScalar(Vector64<Int32>) |
float32_t vcvts_f32_s32 (int32_t a) A32: VCVT. F32. S32 Sd, Sm A64: SCVTF Sd, Sn |
ConvertToSingleScalar(Vector64<UInt32>) |
float32_t vcvts_f32_u32 (uint32_t a) A32: VCVT. F32. U32 Sd, Sm A64: UCVTF Sd, Sn |
ConvertToUInt32RoundAwayFromZero(Vector128<Single>) |
uint32x4_t vcvtaq_u32_f32 (float32x4_t a) A32: VCVTA. U32. F32 Qd, Qm A64: FCVTAU Vd.4S, Vn.4S |
ConvertToUInt32RoundAwayFromZero(Vector64<Single>) |
uint32x2_t vcvta_u32_f32 (float32x2_t a) A32: VCVTA. U32. F32 Dd, Dm A64: FCVTAU Vd.2S, Vn.2S |
ConvertToUInt32RoundAwayFromZeroScalar(Vector64<Single>) |
uint32_t vcvtas_u32_f32 (float32_t a) A32: VCVTA. U32. F32 Sd, Sm A64: FCVTAU Sd, Sn |
ConvertToUInt32RoundToEven(Vector128<Single>) |
uint32x4_t vcvtnq_u32_f32 (float32x4_t a) A32: VCVTN. U32. F32 Qd, Qm A64: FCVTNU Vd.4S, Vn.4S |
ConvertToUInt32RoundToEven(Vector64<Single>) |
uint32x2_t vcvtn_u32_f32 (float32x2_t a) A32: VCVTN. U32. F32 Dd, Dm A64: FCVTNU Vd.2S, Vn.2S |
ConvertToUInt32RoundToEvenScalar(Vector64<Single>) |
uint32_t vcvtns_u32_f32 (float32_t a) A32: VCVTN. U32. F32 Sd, Sm A64: FCVTNU Sd, Sn |
ConvertToUInt32RoundToNegativeInfinity(Vector128<Single>) |
uint32x4_t vcvtmq_u32_f32 (float32x4_t a) A32: VCVTM. U32. F32 Qd, Qm A64: FCVTMU Vd.4S, Vn.4S |
ConvertToUInt32RoundToNegativeInfinity(Vector64<Single>) |
uint32x2_t vcvtm_u32_f32 (float32x2_t a) A32: VCVTM. U32. F32 Dd, Dm A64: FCVTMU Vd.2S, Vn.2S |
ConvertToUInt32RoundToNegativeInfinityScalar(Vector64<Single>) |
uint32_t vcvtms_u32_f32 (float32_t a) A32: VCVTM. U32. F32 Sd, Sm A64: FCVTMU Sd, Sn |
ConvertToUInt32RoundToPositiveInfinity(Vector128<Single>) |
uint32x4_t vcvtpq_u32_f32 (float32x4_t a) A32: VCVTP. U32. F32 Qd, Qm A64: FCVTPU Vd.4S, Vn.4S |
ConvertToUInt32RoundToPositiveInfinity(Vector64<Single>) |
uint32x2_t vcvtp_u32_f32 (float32x2_t a) A32: VCVTP. U32. F32 Dd, Dm A64: FCVTPU Vd.2S, Vn.2S |
ConvertToUInt32RoundToPositiveInfinityScalar(Vector64<Single>) |
uint32_t vcvtps_u32_f32 (float32_t a) A32: VCVTP. U32. F32 Sd, Sm A64: FCVTPU Sd, Sn |
ConvertToUInt32RoundToZero(Vector128<Single>) |
uint32x4_t vcvtq_u32_f32 (float32x4_t a) A32: VCVT. U32. F32 Qd, Qm A64: FCVTZU Vd.4S, Vn.4S |
ConvertToUInt32RoundToZero(Vector64<Single>) |
uint32x2_t vcvt_u32_f32 (float32x2_t a) A32: VCVT. U32. F32 Dd, Dm A64: FCVTZU Vd.2S, Vn.2S |
ConvertToUInt32RoundToZeroScalar(Vector64<Single>) |
uint32_t vcvts_u32_f32 (float32_t a) A32: VCVT. U32. F32 Sd, Sm A64: FCVTZU Sd, Sn |
DivideScalar(Vector64<Double>, Vector64<Double>) |
float64x1_t vdiv_f64 (float64x1_t a, float64x1_t b) A32: VDIV.F64 Dd, Dn, Dm A64: FDIV Dd, Dn, Dm |
DivideScalar(Vector64<Single>, Vector64<Single>) |
float32_t vdivs_f32 (float32_t a, float32_t b) A32: VDIV.F32 Sd, Sn, Sm A64: FDIV Sd, Sn, Sm Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
DuplicateSelectedScalarToVector128(Vector128<Byte>, Byte) |
uint8x16_t vdupq_lane_u8 (uint8x16_t vec, const int lane) A32: VDUP.8 Qd, Dm[index] A64: DUP Vd.16B, Vn.B[index] |
DuplicateSelectedScalarToVector128(Vector128<Int16>, Byte) |
int16x8_t vdupq_lane_s16 (int16x8_t vec, const int lane) A32: VDUP.16 Qd, Dm[index] A64: DUP Vd.8H, Vn.H[index] |
DuplicateSelectedScalarToVector128(Vector128<Int32>, Byte) |
int32x4_t vdupq_lane_s32 (int32x4_t vec, const int lane) A32: VDUP.32 Qd, Dm[index] A64: DUP Vd.4S, Vn.S[index] |
DuplicateSelectedScalarToVector128(Vector128<SByte>, Byte) |
int8x16_t vdupq_lane_s8 (int8x16_t vec, const int lane) A32: VDUP.8 Qd, Dm[index] A64: DUP Vd.16B, Vn.B[index] |
DuplicateSelectedScalarToVector128(Vector128<Single>, Byte) |
float32x4_t vdupq_lane_f32 (float32x4_t vec, const int lane) A32: VDUP.32 Qd, Dm[index] A64: DUP Vd.4S, Vn.S[index] |
DuplicateSelectedScalarToVector128(Vector128<UInt16>, Byte) |
uint16x8_t vdupq_lane_u16 (uint16x8_t vec, const int lane) A32: VDUP.16 Qd, Dm[index] A64: DUP Vd.8H, Vn.H[index] |
DuplicateSelectedScalarToVector128(Vector128<UInt32>, Byte) |
uint32x4_t vdupq_lane_u32 (uint32x4_t vec, const int lane) A32: VDUP.32 Qd, Dm[index] A64: DUP Vd.4S, Vn.S[index] |
DuplicateSelectedScalarToVector128(Vector64<Byte>, Byte) |
uint8x16_t vdupq_lane_u8 (uint8x8_t vec, const int lane) A32: VDUP.8 Qd, Dm[index] A64: DUP Vd.16B, Vn.B[index] |
DuplicateSelectedScalarToVector128(Vector64<Int16>, Byte) |
int16x8_t vdupq_lane_s16 (int16x4_t vec, const int lane) A32: VDUP.16 Qd, Dm[index] A64: DUP Vd.8H, Vn.H[index] |
DuplicateSelectedScalarToVector128(Vector64<Int32>, Byte) |
int32x4_t vdupq_lane_s32 (int32x2_t vec, const int lane) A32: VDUP.32 Qd, Dm[index] A64: DUP Vd.4S, Vn.S[index] |
DuplicateSelectedScalarToVector128(Vector64<SByte>, Byte) |
int8x16_t vdupq_lane_s8 (int8x8_t vec, const int lane) A32: VDUP.8 Qd, Dm[index] A64: DUP Vd.16B, Vn.B[index] |
DuplicateSelectedScalarToVector128(Vector64<Single>, Byte) |
float32x4_t vdupq_lane_f32 (float32x2_t vec, const int lane) A32: VDUP.32 Qd, Dm[index] A64: DUP Vd.4S, Vn.S[index] |
DuplicateSelectedScalarToVector128(Vector64<UInt16>, Byte) |
uint16x8_t vdupq_lane_u16 (uint16x4_t vec, const int lane) A32: VDUP.16 Qd, Dm[index] A64: DUP Vd.8H, Vn.H[index] |
DuplicateSelectedScalarToVector128(Vector64<UInt32>, Byte) |
uint32x4_t vdupq_lane_u32 (uint32x2_t vec, const int lane) A32: VDUP.32 Qd, Dm[index] A64: DUP Vd.4S, Vn.S[index] |
DuplicateSelectedScalarToVector64(Vector128<Byte>, Byte) |
uint8x8_t vdup_laneq_u8 (uint8x16_t vec, const int lane) A32: VDUP.8 Dd, Dm[index] A64: DUP Vd.8B, Vn.B[index] |
DuplicateSelectedScalarToVector64(Vector128<Int16>, Byte) |
int16x4_t vdup_laneq_s16 (int16x8_t vec, const int lane) A32: VDUP.16 Dd, Dm[index] A64: DUP Vd.4H, Vn.H[index] |
DuplicateSelectedScalarToVector64(Vector128<Int32>, Byte) |
int32x2_t vdup_laneq_s32 (int32x4_t vec, const int lane) A32: VDUP.32 Dd, Dm[index] A64: DUP Vd.2S, Vn.S[index] |
DuplicateSelectedScalarToVector64(Vector128<SByte>, Byte) |
int8x8_t vdup_laneq_s8 (int8x16_t vec, const int lane) A32: VDUP.8 Dd, Dm[index] A64: DUP Vd.8B, Vn.B[index] |
DuplicateSelectedScalarToVector64(Vector128<Single>, Byte) |
float32x2_t vdup_laneq_f32 (float32x4_t vec, const int lane) A32: VDUP.32 Dd, Dm[index] A64: DUP Vd.2S, Vn.S[index] |
DuplicateSelectedScalarToVector64(Vector128<UInt16>, Byte) |
uint16x4_t vdup_laneq_u16 (uint16x8_t vec, const int lane) A32: VDUP.16 Dd, Dm[index] A64: DUP Vd.4H, Vn.H[index] |
DuplicateSelectedScalarToVector64(Vector128<UInt32>, Byte) |
uint32x2_t vdup_laneq_u32 (uint32x4_t vec, const int lane) A32: VDUP.32 Dd, Dm[index] A64: DUP Vd.2S, Vn.S[index] |
DuplicateSelectedScalarToVector64(Vector64<Byte>, Byte) |
uint8x8_t vdup_lane_u8 (uint8x8_t vec, const int lane) A32: VDUP.8 Dd, Dm[index] A64: DUP Vd.8B, Vn.B[index] |
DuplicateSelectedScalarToVector64(Vector64<Int16>, Byte) |
int16x4_t vdup_lane_s16 (int16x4_t vec, const int lane) A32: VDUP.16 Dd, Dm[index] A64: DUP Vd.4H, Vn.H[index] |
DuplicateSelectedScalarToVector64(Vector64<Int32>, Byte) |
int32x2_t vdup_lane_s32 (int32x2_t vec, const int lane) A32: VDUP.32 Dd, Dm[index] A64: DUP Vd.2S, Vn.S[index] |
DuplicateSelectedScalarToVector64(Vector64<SByte>, Byte) |
int8x8_t vdup_lane_s8 (int8x8_t vec, const int lane) A32: VDUP.8 Dd, Dm[index] A64: DUP Vd.8B, Vn.B[index] |
DuplicateSelectedScalarToVector64(Vector64<Single>, Byte) |
float32x2_t vdup_lane_f32 (float32x2_t vec, const int lane) A32: VDUP.32 Dd, Dm[index] A64: DUP Vd.2S, Vn.S[index] |
DuplicateSelectedScalarToVector64(Vector64<UInt16>, Byte) |
uint16x4_t vdup_lane_u16 (uint16x4_t vec, const int lane) A32: VDUP.16 Dd, Dm[index] A64: DUP Vd.4H, Vn.H[index] |
DuplicateSelectedScalarToVector64(Vector64<UInt32>, Byte) |
uint32x2_t vdup_lane_u32 (uint32x2_t vec, const int lane) A32: VDUP.32 Dd, Dm[index] A64: DUP Vd.2S, Vn.S[index] |
DuplicateToVector128(Byte) |
uint8x16_t vdupq_n_u8 (uint8_t değer) A32: VDUP.8 Qd, Rt A64: DUP Vd.16B, Rn |
DuplicateToVector128(Int16) |
int16x8_t vdupq_n_s16 (int16_t değer) A32: VDUP.16 Qd, Rt A64: DUP Vd.8H, Rn |
DuplicateToVector128(Int32) |
int32x4_t vdupq_n_s32 (int32_t değer) A32: VDUP.32 Qd, Rt A64: DUP Vd.4S, Rn |
DuplicateToVector128(SByte) |
int8x16_t vdupq_n_s8 (int8_t değer) A32: VDUP.8 Qd, Rt A64: DUP Vd.16B, Rn |
DuplicateToVector128(Single) |
float32x4_t vdupq_n_f32 (float32_t değer) A32: VDUP Qd, Dm[0] A64: DUP Vd.4S, Vn.S[0] |
DuplicateToVector128(UInt16) |
uint16x8_t vdupq_n_u16 (uint16_t değer) A32: VDUP.16 Qd, Rt A64: DUP Vd.8H, Rn |
DuplicateToVector128(UInt32) |
uint32x4_t vdupq_n_u32 (uint32_t değer) A32: VDUP.32 Qd, Rt A64: DUP Vd.4S, Rn |
DuplicateToVector64(Byte) |
uint8x8_t vdup_n_u8 (uint8_t değer) A32: VDUP.8 Dd, Rt A64: DUP Vd.8B, Rn |
DuplicateToVector64(Int16) |
int16x4_t vdup_n_s16 (int16_t değer) A32: VDUP.16 Dd, Rt A64: DUP Vd.4H, Rn |
DuplicateToVector64(Int32) |
int32x2_t vdup_n_s32 (int32_t değer) A32: VDUP.32 Dd, Rt A64: DUP Vd.2S, Rn |
DuplicateToVector64(SByte) |
int8x8_t vdup_n_s8 (int8_t değer) A32: VDUP.8 Dd, Rt A64: DUP Vd.8B, Rn |
DuplicateToVector64(Single) |
float32x2_t vdup_n_f32 (float32_t değer) A32: VDUP Dd, Dm[0] A64: DUP Vd.2S, Vn.S[0] |
DuplicateToVector64(UInt16) |
uint16x4_t vdup_n_u16 (uint16_t değer) A32: VDUP.16 Dd, Rt A64: DUP Vd.4H, Rn |
DuplicateToVector64(UInt32) |
uint32x2_t vdup_n_u32 (uint32_t değer) A32: VDUP.32 Dd, Rt A64: DUP Vd.2S, Rn |
Equals(Object) |
Belirtilen nesnenin geçerli nesneye eşit olup olmadığını belirler. (Devralındığı yer: Object) |
Extract(Vector128<Byte>, Byte) |
uint8_t vgetq_lane_u8 (uint8x16_t v, const int lane) A32: VMOV. U8 Rt, Dn[lane] A64: UMOV Wd, Vn.B[lane] |
Extract(Vector128<Double>, Byte) |
float64_t vgetq_lane_f64 (float64x2_t v, const int lane) A32: VMOV. F64 Dd, Dm A64: DUP Dd, Vn.D[lane] |
Extract(Vector128<Int16>, Byte) |
int16_t vgetq_lane_s16 (int16x8_t v, const int lane) A32: VMOV. S16 Rt, Dn[lane] A64: SMOV Wd, Vn.H[lane] |
Extract(Vector128<Int32>, Byte) |
int32_t vgetq_lane_s32 (int32x4_t v, const int lane) A32: VMOV.32 Rt, Dn[lane] A64: SMOV Wd, Vn.S[lane] |
Extract(Vector128<Int64>, Byte) |
int64_t vgetq_lane_s64 (int64x2_t v, const int lane) A32: VMOV Rt, Rt2, Dm A64: UMOV Xd, Vn.D[lane] |
Extract(Vector128<SByte>, Byte) |
int8_t vgetq_lane_s8 (int8x16_t v, const int lane) A32: VMOV. S8 Rt, Dn[lane] A64: SMOV Wd, Vn.B[lane] |
Extract(Vector128<Single>, Byte) |
float32_t vgetq_lane_f32 (float32x4_t v, const int lane) A32: VMOV. F32 Sd, Sm A64: DUP Sd, Vn.S[lane] |
Extract(Vector128<UInt16>, Byte) |
uint16_t vgetq_lane_u16 (uint16x8_t v, const int lane) A32: VMOV. U16 Rt, Dn[lane] A64: UMOV Wd, Vn.H[lane] |
Extract(Vector128<UInt32>, Byte) |
uint32_t vgetq_lane_u32 (uint32x4_t v, const int lane) A32: VMOV.32 Rt, Dn[lane] A64: UMOV Wd, Vn.S[lane] |
Extract(Vector128<UInt64>, Byte) |
uint64_t vgetq_lane_u64 (uint64x2_t v, const int lane) A32: VMOV Rt, Rt2, Dm A64: UMOV Xd, Vn.D[lane] |
Extract(Vector64<Byte>, Byte) |
uint8_t vget_lane_u8 (uint8x8_t v, const int lane) A32: VMOV. U8 Rt, Dn[lane] A64: UMOV Wd, Vn.B[lane] |
Extract(Vector64<Int16>, Byte) |
int16_t vget_lane_s16 (int16x4_t v, const int lane) A32: VMOV. S16 Rt, Dn[lane] A64: SMOV Wd, Vn.H[lane] |
Extract(Vector64<Int32>, Byte) |
int32_t vget_lane_s32 (int32x2_t v, const int lane) A32: VMOV.32 Rt, Dn[lane] A64: SMOV Wd, Vn.S[lane] |
Extract(Vector64<SByte>, Byte) |
int8_t vget_lane_s8 (int8x8_t v, const int lane) A32: VMOV. S8 Rt, Dn[lane] A64: SMOV Wd, Vn.B[lane] |
Extract(Vector64<Single>, Byte) |
float32_t vget_lane_f32 (float32x2_t v, const int lane) A32: VMOV. F32 Sd, Sm A64: DUP Sd, Vn.S[lane] |
Extract(Vector64<UInt16>, Byte) |
uint16_t vget_lane_u16 (uint16x4_t v, const int lane) A32: VMOV. U16 Rt, Dn[lane] A64: UMOV Wd, Vn.H[lane] |
Extract(Vector64<UInt32>, Byte) |
uint32_t vget_lane_u32 (uint32x2_t v, const int lane) A32: VMOV.32 Rt, Dn[lane] A64: UMOV Wd, Vn.S[lane] |
ExtractNarrowingLower(Vector128<Int16>) |
int8x8_t vmovn_s16 (int16x8_t a) A32: VMOVN. I16 Dd, Qm A64: XTN Vd.8B, Vn.8H |
ExtractNarrowingLower(Vector128<Int32>) |
int16x4_t vmovn_s32 (int32x4_t a) A32: VMOVN. I32 Dd, Qm A64: XTN Vd.4H, Vn.4S |
ExtractNarrowingLower(Vector128<Int64>) |
int32x2_t vmovn_s64 (int64x2_t a) A32: VMOVN. I64 Dd, Qm A64: XTN Vd.2S, Vn.2D |
ExtractNarrowingLower(Vector128<UInt16>) |
uint8x8_t vmovn_u16 (a uint16x8_t) A32: VMOVN. I16 Dd, Qm A64: XTN Vd.8B, Vn.8H |
ExtractNarrowingLower(Vector128<UInt32>) |
uint16x4_t vmovn_u32 (uint32x4_t a) A32: VMOVN. I32 Dd, Qm A64: XTN Vd.4H, Vn.4S |
ExtractNarrowingLower(Vector128<UInt64>) |
uint32x2_t vmovn_u64 (uint64x2_t a) A32: VMOVN. I64 Dd, Qm A64: XTN Vd.2S, Vn.2D |
ExtractNarrowingSaturateLower(Vector128<Int16>) |
int8x8_t vqmovn_s16 (int16x8_t a) A32: VQMOVN. S16 Dd, Qm A64: SQXTN Vd.8B, Vn.8H |
ExtractNarrowingSaturateLower(Vector128<Int32>) |
int16x4_t vqmovn_s32 (int32x4_t a) A32: VQMOVN. S32 Dd, Qm A64: SQXTN Vd.4H, Vn.4S |
ExtractNarrowingSaturateLower(Vector128<Int64>) |
int32x2_t vqmovn_s64 (int64x2_t a) A32: VQMOVN. S64 Dd, Qm A64: SQXTN Vd.2S, Vn.2D |
ExtractNarrowingSaturateLower(Vector128<UInt16>) |
uint8x8_t vqmovn_u16 (uint16x8_t a) A32: VQMOVN. U16 Dd, Qm A64: UQXTN Vd.8B, Vn.8H |
ExtractNarrowingSaturateLower(Vector128<UInt32>) |
uint16x4_t vqmovn_u32 (uint32x4_t a) A32: VQMOVN. U32 Dd, Qm A64: UQXTN Vd.4H, Vn.4S |
ExtractNarrowingSaturateLower(Vector128<UInt64>) |
uint32x2_t vqmovn_u64 (uint64x2_t a) A32: VQMOVN. U64 Dd, Qm A64: UQXTN Vd.2S, Vn.2D |
ExtractNarrowingSaturateUnsignedLower(Vector128<Int16>) |
uint8x8_t vqmovun_s16 (int16x8_t a) A32: VQMOVUN. S16 Dd, Qm A64: SQXTUN Vd.8B, Vn.8H |
ExtractNarrowingSaturateUnsignedLower(Vector128<Int32>) |
uint16x4_t vqmovun_s32 (int32x4_t a) A32: VQMOVUN. S32 Dd, Qm A64: SQXTUN Vd.4H, Vn.4S |
ExtractNarrowingSaturateUnsignedLower(Vector128<Int64>) |
uint32x2_t vqmovun_s64 (int64x2_t a) A32: VQMOVUN. S64 Dd, Qm A64: SQXTUN Vd.2S, Vn.2D |
ExtractNarrowingSaturateUnsignedUpper(Vector64<Byte>, Vector128<Int16>) |
uint8x16_t vqmovun_high_s16 (uint8x8_t r, int16x8_t a) A32: VQMOVUN. S16 Dd+1, Qm A64: SQXTUN2 Vd.16B, Vn.8H |
ExtractNarrowingSaturateUnsignedUpper(Vector64<UInt16>, Vector128<Int32>) |
uint16x8_t vqmovun_high_s32 (uint16x4_t r, int32x4_t a) A32: VQMOVUN. S32 Dd+1, Qm A64: SQXTUN2 Vd.8H, Vn.4S |
ExtractNarrowingSaturateUnsignedUpper(Vector64<UInt32>, Vector128<Int64>) |
uint32x4_t vqmovun_high_s64 (uint32x2_t r, int64x2_t a) A32: VQMOVUN. S64 Dd+1, Qm A64: SQXTUN2 Vd.4S, Vn.2D |
ExtractNarrowingSaturateUpper(Vector64<Byte>, Vector128<UInt16>) |
uint8x16_t vqmovn_high_u16 (uint8x8_t r, uint16x8_t a) A32: VQMOVN. U16 Dd+1, Qm A64: UQXTN2 Vd.16B, Vn.8H |
ExtractNarrowingSaturateUpper(Vector64<Int16>, Vector128<Int32>) |
int16x8_t vqmovn_high_s32 (int16x4_t r, int32x4_t a) A32: VQMOVN. S32 Dd+1, Qm A64: SQXTN2 Vd.8H, Vn.4S |
ExtractNarrowingSaturateUpper(Vector64<Int32>, Vector128<Int64>) |
int32x4_t vqmovn_high_s64 (int32x2_t r, int64x2_t a) A32: VQMOVN. S64 Dd+1, Qm A64: SQXTN2 Vd.4S, Vn.2D |
ExtractNarrowingSaturateUpper(Vector64<SByte>, Vector128<Int16>) |
int8x16_t vqmovn_high_s16 (int8x8_t r, int16x8_t a) A32: VQMOVN. S16 Dd+1, Qm A64: SQXTN2 Vd.16B, Vn.8H |
ExtractNarrowingSaturateUpper(Vector64<UInt16>, Vector128<UInt32>) |
uint16x8_t vqmovn_high_u32 (uint16x4_t r, uint32x4_t a) A32: VQMOVN. U32 Dd+1, Qm A64: UQXTN2 Vd.8H, Vn.4S |
ExtractNarrowingSaturateUpper(Vector64<UInt32>, Vector128<UInt64>) |
uint32x4_t vqmovn_high_u64 (uint32x2_t r, uint64x2_t a) A32: VQMOVN. U64 Dd+1, Qm A64: UQXTN2 Vd.4S, Vn.2D |
ExtractNarrowingUpper(Vector64<Byte>, Vector128<UInt16>) |
uint8x16_t vmovn_high_u16 (uint8x8_t r, uint16x8_t a) A32: VMOVN. I16 Dd+1, Qm A64: XTN2 Vd.16B, Vn.8H |
ExtractNarrowingUpper(Vector64<Int16>, Vector128<Int32>) |
int16x8_t vmovn_high_s32 (int16x4_t r, int32x4_t a) A32: VMOVN. I32 Dd+1, Qm A64: XTN2 Vd.8H, Vn.4S |
ExtractNarrowingUpper(Vector64<Int32>, Vector128<Int64>) |
int32x4_t vmovn_high_s64 (int32x2_t r, int64x2_t a) A32: VMOVN. I64 Dd+1, Qm A64: XTN2 Vd.4S, Vn.2D |
ExtractNarrowingUpper(Vector64<SByte>, Vector128<Int16>) |
int8x16_t vmovn_high_s16 (int8x8_t r, int16x8_t a) A32: VMOVN. I16 Dd+1, Qm A64: XTN2 Vd.16B, Vn.8H |
ExtractNarrowingUpper(Vector64<UInt16>, Vector128<UInt32>) |
uint16x8_t vmovn_high_u32 (uint16x4_t r, uint32x4_t a) A32: VMOVN. I32 Dd+1, Qm A64: XTN2 Vd.8H, Vn.4S |
ExtractNarrowingUpper(Vector64<UInt32>, Vector128<UInt64>) |
uint32x4_t vmovn_high_u64 (uint32x2_t r, uint64x2_t a) A32: VMOVN. I64 Dd+1, Qm A64: XTN2 Vd.4S, Vn.2D |
ExtractVector128(Vector128<Byte>, Vector128<Byte>, Byte) |
uint8x16_t vextq_s8 (uint8x16_t a, uint8x16_t b, const int n) A32: VEXT.8 Qd, Qn, Qm, #n A64: EXT Vd.16B, Vn.16B, Vm.16B, #n |
ExtractVector128(Vector128<Double>, Vector128<Double>, Byte) |
float64x2_t vextq_f64 (float64x2_t a, float64x2_t b, const int n) A32: VEXT.8 Qd, Qn, Qm, #(n*8) A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) |
ExtractVector128(Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vextq_s16 (int16x8_t a, int16x8_t b, const int n) A32: VEXT.8 Qd, Qn, Qm, #(n*2) A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*2) |
ExtractVector128(Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vextq_s32 (a, int32x4_t b, const int n int32x4_t) A32: VEXT.8 Qd, Qn, Qm, #(n*4) A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) |
ExtractVector128(Vector128<Int64>, Vector128<Int64>, Byte) |
int64x2_t vextq_s64 (a, int64x2_t b, const int n int64x2_t) A32: VEXT.8 Qd, Qn, Qm, #(n*8) A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) |
ExtractVector128(Vector128<SByte>, Vector128<SByte>, Byte) |
int8x16_t vextq_s8 (int8x16_t a, int8x16_t b, const int n) A32: VEXT.8 Qd, Qn, Qm, #n A64: EXT Vd.16B, Vn.16B, Vm.16B, #n |
ExtractVector128(Vector128<Single>, Vector128<Single>, Byte) |
float32x4_t vextq_f32 (float32x4_t a, float32x4_t b, const int n) A32: VEXT.8 Qd, Qn, Qm, #(n*4) A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) |
ExtractVector128(Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint16x8_t vextq_s16 (uint16x8_t a, uint16x8_t b, const int n) A32: VEXT.8 Qd, Qn, Qm, #(n*2) A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*2) |
ExtractVector128(Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint32x4_t vextq_s32 (uint32x4_t a, uint32x4_t b, const int n) A32: VEXT.8 Qd, Qn, Qm, #(n*4) A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) |
ExtractVector128(Vector128<UInt64>, Vector128<UInt64>, Byte) |
uint64x2_t vextq_s64 (uint64x2_t a, uint64x2_t b, const int n) A32: VEXT.8 Qd, Qn, Qm, #(n*8) A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) |
ExtractVector64(Vector64<Byte>, Vector64<Byte>, Byte) |
uint8x8_t vext_s8 (uint8x8_t a, uint8x8_t b, const int n) A32: VEXT.8 Dd, Dn, Dm, #n A64: EXT Vd.8B, Vn.8B, Vm.8B, #n |
ExtractVector64(Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vext_s16 (int16x4_t a, int16x4_t b, const int n) A32: VEXT.8 Dd, Dn, Dm, #(n*2) A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*2) |
ExtractVector64(Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vext_s32 (int32x2_t a, int32x2_t b, const int n) A32: VEXT.8 Dd, Dn, Dm, #(n*4) A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) |
ExtractVector64(Vector64<SByte>, Vector64<SByte>, Byte) |
int8x8_t vext_s8 (int8x8_t a, int8x8_t b, const int n) A32: VEXT.8 Dd, Dn, Dm, #n A64: EXT Vd.8B, Vn.8B, Vm.8B, #n |
ExtractVector64(Vector64<Single>, Vector64<Single>, Byte) |
float32x2_t vext_f32 (float32x2_t a, float32x2_t b, const int n) A32: VEXT.8 Dd, Dn, Dm, #(n*4) A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) |
ExtractVector64(Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint16x4_t vext_s16 (uint16x4_t a, uint16x4_t b, const int n) A32: VEXT.8 Dd, Dn, Dm, #(n*2) A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*2) |
ExtractVector64(Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint32x2_t vext_s32 (a, uint32x2_t b, const int n uint32x2_t) A32: VEXT.8 Dd, Dn, Dm, #(n*4) A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) |
Floor(Vector128<Single>) |
float32x4_t vrndmq_f32 (float32x4_t a) A32: VRINTM. F32 Qd, Qm A64: FRINTM Vd.4S, Vn.4S |
Floor(Vector64<Single>) |
float32x2_t vrndm_f32 (float32x2_t a) A32: VRINTM. F32 Dd, Dm A64: FRINTM Vd.2S, Vn.2S |
FloorScalar(Vector64<Double>) |
float64x1_t vrndm_f64 (float64x1_t a) A32: VRINTM. F64 Dd, Dm A64: FRINTM Dd, Dn |
FloorScalar(Vector64<Single>) |
float32_t vrndms_f32 (float32_t a) A32: VRINTM. F32 Sd, Sm A64: FRINTM Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
FusedAddHalving(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vhaddq_u8 (uint8x16_t a, uint8x16_t b) A32: VHADD. U8 Qd, Qn, Qm A64: UHADD Vd.16B, Vn.16B, Vm.16B |
FusedAddHalving(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vhaddq_s16 (int16x8_t a, int16x8_t b) A32: VHADD. S16 Qd, Qn, Qm A64: SHADD Vd.8H, Vn.8H, Vm.8H |
FusedAddHalving(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vhaddq_s32 (int32x4_t a, int32x4_t b) A32: VHADD. S32 Qd, Qn, Qm A64: SHADD Vd.4S, Vn.4S, Vm.4S |
FusedAddHalving(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vhaddq_s8 (int8x16_t a, int8x16_t b) A32: VHADD. S8 Qd, Qn, Qm A64: SHADD Vd.16B, Vn.16B, Vm.16B |
FusedAddHalving(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vhaddq_u16 (uint16x8_t a, uint16x8_t b) A32: VHADD. U16 Qd, Qn, Qm A64: UHADD Vd.8H, Vn.8H, Vm.8H |
FusedAddHalving(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vhaddq_u32 (uint32x4_t a, uint32x4_t b) A32: VHADD. U32 Qd, Qn, Qm A64: UHADD Vd.4S, Vn.4S, Vm.4S |
FusedAddHalving(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vhadd_u8 (uint8x8_t a, uint8x8_t b) A32: VHADD. U8 Dd, Dn, Dm A64: UHADD Vd.8B, Vn.8B, Vm.8B |
FusedAddHalving(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vhadd_s16 (int16x4_t a, int16x4_t b) A32: VHADD. S16 Dd, Dn, Dm A64: SHADD Vd.4H, Vn.4H, Vm.4H |
FusedAddHalving(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vhadd_s32 (int32x2_t a, int32x2_t b) A32: VHADD. S32 Dd, Dn, Dm A64: SHADD Vd.2S, Vn.2S, Vm.2S |
FusedAddHalving(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vhadd_s8 (int8x8_t a, int8x8_t b) A32: VHADD. S8 Dd, Dn, Dm A64: SHADD Vd.8B, Vn.8B, Vm.8B |
FusedAddHalving(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vhadd_u16 (uint16x4_t a, uint16x4_t b) A32: VHADD. U16 Dd, Dn, Dm A64: UHADD Vd.4H, Vn.4H, Vm.4H |
FusedAddHalving(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vhadd_u32 (uint32x2_t a, uint32x2_t b) A32: VHADD. U32 Dd, Dn, Dm A64: UHADD Vd.2S, Vn.2S, Vm.2S |
FusedAddRoundedHalving(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vrhaddq_u8 (uint8x16_t a, uint8x16_t b) A32: VRHADD. U8 Qd, Qn, Qm A64: URHADD Vd.16B, Vn.16B, Vm.16B |
FusedAddRoundedHalving(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vrhaddq_s16 (int16x8_t a, int16x8_t b) A32: VRHADD. S16 Qd, Qn, Qm A64: SRHADD Vd.8H, Vn.8H, Vm.8H |
FusedAddRoundedHalving(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vrhaddq_s32 (int32x4_t a, int32x4_t b) A32: VRHADD. S32 Qd, Qn, Qm A64: SRHADD Vd.4S, Vn.4S, Vm.4S |
FusedAddRoundedHalving(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vrhaddq_s8 (a, int8x16_t b int8x16_t) A32: VRHADD. S8 Qd, Qn, Qm A64: SRHADD Vd.16B, Vn.16B, Vm.16B |
FusedAddRoundedHalving(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vrhaddq_u16 (a, uint16x8_t b uint16x8_t) A32: VRHADD. U16 Qd, Qn, Qm A64: URHADD Vd.8H, Vn.8H, Vm.8H |
FusedAddRoundedHalving(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vrhaddq_u32 (uint32x4_t a, uint32x4_t b) A32: VRHADD. U32 Qd, Qn, Qm A64: URHADD Vd.4S, Vn.4S, Vm.4S |
FusedAddRoundedHalving(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vrhadd_u8 (a, uint8x8_t b uint8x8_t) A32: VRHADD. U8 Dd, Dn, Dm A64: URHADD Vd.8B, Vn.8B, Vm.8B |
FusedAddRoundedHalving(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vrhadd_s16 (int16x4_t a, int16x4_t b) A32: VRHADD. S16 Dd, Dn, Dm A64: SRHADD Vd.4H, Vn.4H, Vm.4H |
FusedAddRoundedHalving(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vrhadd_s32 (int32x2_t a, int32x2_t b) A32: VRHADD. S32 Dd, Dn, Dm A64: SRHADD Vd.2S, Vn.2S, Vm.2S |
FusedAddRoundedHalving(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vrhadd_s8 (int8x8_t a, int8x8_t b) A32: VRHADD. S8 Dd, Dn, Dm A64: SRHADD Vd.8B, Vn.8B, Vm.8B |
FusedAddRoundedHalving(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vrhadd_u16 (uint16x4_t a, uint16x4_t b) A32: VRHADD. U16 Dd, Dn, Dm A64: URHADD Vd.4H, Vn.4H, Vm.4H |
FusedAddRoundedHalving(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vrhadd_u32 (a, uint32x2_t b uint32x2_t) A32: VRHADD. U32 Dd, Dn, Dm A64: URHADD Vd.2S, Vn.2S, Vm.2S |
FusedMultiplyAdd(Vector128<Single>, Vector128<Single>, Vector128<Single>) |
float32x4_t vfmaq_f32 (a, float32x4_t b, float32x4_t c float32x4_t) A32: VFMA. F32 Qd, Qn, Qm A64: FMLA Vd.4S, Vn.4S, Vm.4S |
FusedMultiplyAdd(Vector64<Single>, Vector64<Single>, Vector64<Single>) |
float32x2_t vfma_f32 (a, float32x2_t b, float32x2_t c float32x2_t) A32: VFMA. F32 Dd, Dn, Dm A64: FMLA Vd.2S, Vn.2S, Vm.2S |
FusedMultiplyAddNegatedScalar(Vector64<Double>, Vector64<Double>, Vector64<Double>) |
float64x1_t vfnma_f64 (a, float64x1_t b, float64x1_t c float64x1_t) A32: VFNMA. F64 Dd, Dn, Dm A64: FNMADD Dd, Dn, Dm, Da Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
FusedMultiplyAddNegatedScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>) |
float32_t vfnmas_f32 (a, float32_t b, float32_t c float32_t) A32: VFNMA. F32 Sd, Sn, Sm A64: FNMADD Sd, Sn, Sm, Sa Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
FusedMultiplyAddScalar(Vector64<Double>, Vector64<Double>, Vector64<Double>) |
float64x1_t vfma_f64 (a, float64x1_t b, float64x1_t c float64x1_t) A32: VFMA. F64 Dd, Dn, Dm A64: FMADD Dd, Dn, Dm, Da |
FusedMultiplyAddScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>) |
float32_t vfmas_f32 (a, float32_t b, float32_t c float32_t) A32: VFMA. F32 Sd, Sn, Sm A64: FMADD Sd, Sn, Sm, Sa Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
FusedMultiplySubtract(Vector128<Single>, Vector128<Single>, Vector128<Single>) |
float32x4_t vfmsq_f32 (a, float32x4_t b, float32x4_t c float32x4_t) A32: VFMS. F32 Qd, Qn, Qm A64: FMLS Vd.4S, Vn.4S, Vm.4S |
FusedMultiplySubtract(Vector64<Single>, Vector64<Single>, Vector64<Single>) |
float32x2_t vfms_f32 (a, float32x2_t b, float32x2_t c float32x2_t) A32: VFMS. F32 Dd, Dn, Dm A64: FMLS Vd.2S, Vn.2S, Vm.2S |
FusedMultiplySubtractNegatedScalar(Vector64<Double>, Vector64<Double>, Vector64<Double>) |
float64x1_t vfnms_f64 (a, float64x1_t b, float64x1_t c float64x1_t) A32: VFNMS. F64 Dd, Dn, Dm A64: FNMSUB Dd, Dn, Dm, Da Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
FusedMultiplySubtractNegatedScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>) |
float32_t vfnmss_f32 (a, float32_t b, float32_t c float32_t) A32: VFNMS. F32 Sd, Sn, Sm A64: FNMSUB Sd, Sn, Sm, Sa Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
FusedMultiplySubtractScalar(Vector64<Double>, Vector64<Double>, Vector64<Double>) |
float64x1_t vfms_f64 (a, float64x1_t b, float64x1_t c float64x1_t) A32: VFMS. F64 Dd, Dn, Dm A64: FMSUB Dd, Dn, Dm, Da |
FusedMultiplySubtractScalar(Vector64<Single>, Vector64<Single>, Vector64<Single>) |
float32_t vfmss_f32 (a, float32_t b, float32_t c float32_t) A32: VFMS. F32 Sd, Sn, Sm A64: FMSUB Sd, Sn, Sm, Sa Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
FusedSubtractHalving(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vhsubq_u8 (uint8x16_t a, uint8x16_t b) A32: VHSUB. U8 Qd, Qn, Qm A64: UHSUB Vd.16B, Vn.16B, Vm.16B |
FusedSubtractHalving(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vhsubq_s16 (int16x8_t a, int16x8_t b) A32: VHSUB. S16 Qd, Qn, Qm A64: SHSUB Vd.8H, Vn.8H, Vm.8H |
FusedSubtractHalving(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vhsubq_s32 (int32x4_t a, int32x4_t b) A32: VHSUB. S32 Qd, Qn, Qm A64: SHSUB Vd.4S, Vn.4S, Vm.4S |
FusedSubtractHalving(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vhsubq_s8 (int8x16_t a, int8x16_t b) A32: VHSUB. S8 Qd, Qn, Qm A64: SHSUB Vd.16B, Vn.16B, Vm.16B |
FusedSubtractHalving(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vhsubq_u16 (uint16x8_t a, uint16x8_t b) A32: VHSUB. U16 Qd, Qn, Qm A64: UHSUB Vd.8H, Vn.8H, Vm.8H |
FusedSubtractHalving(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vhsubq_u32 (uint32x4_t a, uint32x4_t b) A32: VHSUB. U32 Qd, Qn, Qm A64: UHSUB Vd.4S, Vn.4S, Vm.4S |
FusedSubtractHalving(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vhsub_u8 (uint8x8_t a, uint8x8_t b) A32: VHSUB. U8 Dd, Dn, Dm A64: UHSUB Vd.8B, Vn.8B, Vm.8B |
FusedSubtractHalving(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vhsub_s16 (a, int16x4_t b int16x4_t) A32: VHSUB. S16 Dd, Dn, Dm A64: SHSUB Vd.4H, Vn.4H, Vm.4H |
FusedSubtractHalving(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vhsub_s32 (int32x2_t a, int32x2_t b) A32: VHSUB. S32 Dd, Dn, Dm A64: SHSUB Vd.2S, Vn.2S, Vm.2S |
FusedSubtractHalving(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vhsub_s8 (int8x8_t a, int8x8_t b) A32: VHSUB. S8 Dd, Dn, Dm A64: SHSUB Vd.8B, Vn.8B, Vm.8B |
FusedSubtractHalving(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vhsub_u16 (uint16x4_t a, uint16x4_t b) A32: VHSUB. U16 Dd, Dn, Dm A64: UHSUB Vd.4H, Vn.4H, Vm.4H |
FusedSubtractHalving(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vhsub_u32 (uint32x2_t a, uint32x2_t b) A32: VHSUB. U32 Dd, Dn, Dm A64: UHSUB Vd.2S, Vn.2S, Vm.2S |
GetHashCode() |
Varsayılan karma işlevi işlevi görür. (Devralındığı yer: Object) |
GetType() |
Type Geçerli örneğini alır. (Devralındığı yer: Object) |
Insert(Vector128<Byte>, Byte, Byte) |
uint8x16_t vsetq_lane_u8 (uint8_t a, uint8x16_t v, const int lane) A32: VMOV.8 Dd[lane], Rt A64: INS Vd.B[lane], Wn |
Insert(Vector128<Double>, Byte, Double) |
float64x2_t vsetq_lane_f64 (float64_t a, float64x2_t v, const int lane) A32: VMOV. F64 Dd, Dm A64: INS Vd.D[lane], Vn.D[0] |
Insert(Vector128<Int16>, Byte, Int16) |
int16x8_t vsetq_lane_s16 (int16_t a, int16x8_t v, const int lane) A32: VMOV.16 Dd[lane], Rt A64: INS Vd.H[lane], Wn |
Insert(Vector128<Int32>, Byte, Int32) |
int32x4_t vsetq_lane_s32 (int32_t a, int32x4_t v, const int lane) A32: VMOV.32 Dd[lane], Rt A64: INS Vd.S[lane], Wn |
Insert(Vector128<Int64>, Byte, Int64) |
int64x2_t vsetq_lane_s64 (int64_t a, int64x2_t v, const int lane) A32: VMOV.64 Dd, Rt, Rt2 A64: INS Vd.D[lane], Xn |
Insert(Vector128<SByte>, Byte, SByte) |
int8x16_t vsetq_lane_s8 (a, int8x16_t v, const int lane int8_t) A32: VMOV.8 Dd[lane], Rt A64: INS Vd.B[lane], Wn |
Insert(Vector128<Single>, Byte, Single) |
float32x4_t vsetq_lane_f32 (a, float32x4_t v, const int lane float32_t) A32: VMOV. F32 Sd, Sm A64: INS Vd.S[lane], Vn.S[0] |
Insert(Vector128<UInt16>, Byte, UInt16) |
uint16x8_t vsetq_lane_u16 (uint16_t a, uint16x8_t v, const int lane) A32: VMOV.16 Dd[lane], Rt A64: INS Vd.H[lane], Wn |
Insert(Vector128<UInt32>, Byte, UInt32) |
uint32x4_t vsetq_lane_u32 (uint32_t a, uint32x4_t v, const int lane) A32: VMOV.32 Dd[lane], Rt A64: INS Vd.S[lane], Wn |
Insert(Vector128<UInt64>, Byte, UInt64) |
uint64x2_t vsetq_lane_u64 (uint64_t a, uint64x2_t v, const int lane) A32: VMOV.64 Dd, Rt, Rt2 A64: INS Vd.D[lane], Xn |
Insert(Vector64<Byte>, Byte, Byte) |
uint8x8_t vset_lane_u8 (uint8_t a, uint8x8_t v, const int lane) A32: VMOV.8 Dd[lane], Rt A64: INS Vd.B[lane], Wn |
Insert(Vector64<Int16>, Byte, Int16) |
int16x4_t vset_lane_s16 (a, int16x4_t v, const int lane int16_t) A32: VMOV.16 Dd[lane], Rt A64: INS Vd.H[lane], Wn |
Insert(Vector64<Int32>, Byte, Int32) |
int32x2_t vset_lane_s32 (a, int32x2_t v, const int lane int32_t) A32: VMOV.32 Dd[lane], Rt A64: INS Vd.S[lane], Wn |
Insert(Vector64<SByte>, Byte, SByte) |
int8x8_t vset_lane_s8 (int8_t a, int8x8_t v, const int lane) A32: VMOV.8 Dd[lane], Rt A64: INS Vd.B[lane], Wn |
Insert(Vector64<Single>, Byte, Single) |
float32x2_t vset_lane_f32 (float32_t a, float32x2_t v, const int lane) A32: VMOV. F32 Sd, Sm A64: INS Vd.S[lane], Vn.S[0] |
Insert(Vector64<UInt16>, Byte, UInt16) |
uint16x4_t vset_lane_u16 (a, uint16x4_t v, const int lane uint16_t) A32: VMOV.16 Dd[lane], Rt A64: INS Vd.H[lane], Wn |
Insert(Vector64<UInt32>, Byte, UInt32) |
uint32x2_t vset_lane_u32 (a, uint32x2_t v, const int lane uint32_t) A32: VMOV.32 Dd[lane], Rt A64: INS Vd.S[lane], Wn |
InsertScalar(Vector128<Double>, Byte, Vector64<Double>) |
float64x2_t vcopyq_lane_f64 (float64x2_t a, const int lane1, float64x1_t b, const int lane2) A32: VMOV. F64 Dd, Dm A64: INS Vd.D[lane1], Vn.D[0] |
InsertScalar(Vector128<Int64>, Byte, Vector64<Int64>) |
int64x2_t vcopyq_lane_s64 (int64x2_t a, const int lane1, int64x1_t b, const int lane2) A32: VMOV Dd, Dm A64: INS Vd.D[lane1], Vn.D[0] |
InsertScalar(Vector128<UInt64>, Byte, Vector64<UInt64>) |
uint64x2_t vcopyq_lane_u64 (uint64x2_t a, const int lane1, uint64x1_t b, const int lane2) A32: VMOV Dd, Dm A64: INS Vd.D[lane1], Vn.D[0] |
LeadingSignCount(Vector128<Int16>) |
int16x8_t vclsq_s16 (int16x8_t a) A32: VCL'ler. S16 Qd, Qm A64: CLS Vd.8H, Vn.8H |
LeadingSignCount(Vector128<Int32>) |
int32x4_t vclsq_s32 (int32x4_t a) A32: VCL'ler. S32 Qd, Qm A64: CLS Vd.4S, Vn.4S |
LeadingSignCount(Vector128<SByte>) |
int8x16_t vclsq_s8 (int8x16_t a) A32: VCL'ler. S8 Qd, Qm A64: CLS Vd.16B, Vn.16B |
LeadingSignCount(Vector64<Int16>) |
int16x4_t vcls_s16 (int16x4_t a) A32: VCL'ler. S16 Dd, Dm A64: CLS Vd.4H, Vn.4H |
LeadingSignCount(Vector64<Int32>) |
int32x2_t vcls_s32 (int32x2_t a) A32: VCL'ler. S32 Dd, Dm A64: CLS Vd.2S, Vn.2S |
LeadingSignCount(Vector64<SByte>) |
int8x8_t vcls_s8 (int8x8_t a) A32: VCL'ler. S8 Dd, Dm A64: CLS Vd.8B, Vn.8B |
LeadingZeroCount(Vector128<Byte>) |
uint8x16_t vclzq_u8 (uint8x16_t a) A32: VCLZ. I8 Qd, Qm A64: CLZ Vd.16B, Vn.16B |
LeadingZeroCount(Vector128<Int16>) |
int16x8_t vclzq_s16 (int16x8_t a) A32: VCLZ. I16 Qd, Qm A64: CLZ Vd.8H, Vn.8H |
LeadingZeroCount(Vector128<Int32>) |
int32x4_t vclzq_s32 (int32x4_t a) A32: VCLZ. I32 Qd, Qm A64: CLZ Vd.4S, Vn.4S |
LeadingZeroCount(Vector128<SByte>) |
int8x16_t vclzq_s8 (int8x16_t a) A32: VCLZ. I8 Qd, Qm A64: CLZ Vd.16B, Vn.16B |
LeadingZeroCount(Vector128<UInt16>) |
uint16x8_t vclzq_u16 (uint16x8_t a) A32: VCLZ. I16 Qd, Qm A64: CLZ Vd.8H, Vn.8H |
LeadingZeroCount(Vector128<UInt32>) |
uint32x4_t vclzq_u32 (uint32x4_t a) A32: VCLZ. I32 Qd, Qm A64: CLZ Vd.4S, Vn.4S |
LeadingZeroCount(Vector64<Byte>) |
uint8x8_t vclz_u8 (uint8x8_t a) A32: VCLZ. I8 Dd, Dm A64: CLZ Vd.8B, Vn.8B |
LeadingZeroCount(Vector64<Int16>) |
int16x4_t vclz_s16 (int16x4_t a) A32: VCLZ. I16 Dd, Dm A64: CLZ Vd.4H, Vn.4H |
LeadingZeroCount(Vector64<Int32>) |
int32x2_t vclz_s32 (int32x2_t a) A32: VCLZ. I32 Dd, Dm A64: CLZ Vd.2S, Vn.2S |
LeadingZeroCount(Vector64<SByte>) |
int8x8_t vclz_s8 (int8x8_t a) A32: VCLZ. I8 Dd, Dm A64: CLZ Vd.8B, Vn.8B |
LeadingZeroCount(Vector64<UInt16>) |
uint16x4_t vclz_u16 (uint16x4_t a) A32: VCLZ. I16 Dd, Dm A64: CLZ Vd.4H, Vn.4H |
LeadingZeroCount(Vector64<UInt32>) |
uint32x2_t vclz_u32 (uint32x2_t a) A32: VCLZ. I32 Dd, Dm A64: CLZ Vd.2S, Vn.2S |
LoadAndInsertScalar(ValueTuple<Vector64<Byte>,Vector64<Byte>,Vector64<Byte>,Vector64<Byte>>, Byte, Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Byte>,Vector64<Byte>,Vector64<Byte>>, Byte, Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Byte>,Vector64<Byte>>, Byte, Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Int16>,Vector64<Int16>,Vector64<Int16>,Vector64<Int16>>, Byte, Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Int16>,Vector64<Int16>,Vector64<Int16>>, Byte, Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Int16>,Vector64<Int16>>, Byte, Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Int32>,Vector64<Int32>,Vector64<Int32>,Vector64<Int32>>, Byte, Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Int32>,Vector64<Int32>,Vector64<Int32>>, Byte, Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Int32>,Vector64<Int32>>, Byte, Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<SByte>,Vector64<SByte>,Vector64<SByte>,Vector64<SByte>>, Byte, SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<SByte>,Vector64<SByte>,Vector64<SByte>>, Byte, SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<SByte>,Vector64<SByte>>, Byte, SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Single>,Vector64<Single>,Vector64<Single>,Vector64<Single>>, Byte, Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Single>,Vector64<Single>,Vector64<Single>>, Byte, Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<Single>,Vector64<Single>>, Byte, Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>>, Byte, UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>>, Byte, UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<UInt16>,Vector64<UInt16>>, Byte, UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>>, Byte, UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>>, Byte, UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(ValueTuple<Vector64<UInt32>,Vector64<UInt32>>, Byte, UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndInsertScalar(Vector128<Byte>, Byte, Byte*) |
uint8x16_t vld1q_lane_u8 (uint8_t const * ptr, uint8x16_t src, const int lane) A32: VLD1.8 { Dd[index] }, [Rn] A64: LD1 { Vt.B }[index], [Xn] |
LoadAndInsertScalar(Vector128<Double>, Byte, Double*) |
float64x2_t vld1q_lane_f64 (float64_t const * ptr, float64x2_t src, const int lane) A32: VLDR.64 Dd, [Rn] A64: LD1 { Vt.D }[index], [Xn] |
LoadAndInsertScalar(Vector128<Int16>, Byte, Int16*) |
int16x8_t vld1q_lane_s16 (int16_t const * ptr, int16x8_t src, const int lane) A32: VLD1.16 { Dd[index] }, [Rn] A64: LD1 { Vt.H }[index], [Xn] |
LoadAndInsertScalar(Vector128<Int32>, Byte, Int32*) |
int32x4_t vld1q_lane_s32 (int32_t const * ptr, int32x4_t src, const int lane) A32: VLD1.32 { Dd[index] }, [Rn] A64: LD1 { Vt.S }[index], [Xn] |
LoadAndInsertScalar(Vector128<Int64>, Byte, Int64*) |
int64x2_t vld1q_lane_s64 (int64_t const * ptr, int64x2_t src, const int lane) A32: VLDR.64 Dd, [Rn] A64: LD1 { Vt.D }[index], [Xn] |
LoadAndInsertScalar(Vector128<SByte>, Byte, SByte*) |
int8x16_t vld1q_lane_s8 (int8_t const * ptr, int8x16_t src, const int lane) A32: VLD1.8 { Dd[index] }, [Rn] A64: LD1 { Vt.B }[index], [Xn] |
LoadAndInsertScalar(Vector128<Single>, Byte, Single*) |
float32x4_t vld1q_lane_f32 (float32_t const * ptr, float32x4_t src, const int lane) A32: VLD1.32 { Dd[index] }, [Rn] A64: LD1 { Vt.S }[index], [Xn] |
LoadAndInsertScalar(Vector128<UInt16>, Byte, UInt16*) |
uint16x8_t vld1q_lane_u16 (uint16_t const * ptr, uint16x8_t src, const int lane) A32: VLD1.16 { Dd[index] }, [Rn] A64: LD1 { Vt.H }[index], [Xn] |
LoadAndInsertScalar(Vector128<UInt32>, Byte, UInt32*) |
uint32x4_t vld1q_lane_u32 (uint32_t const * ptr, uint32x4_t src, const int lane) A32: VLD1.32 { Dd[index] }, [Rn] A64: LD1 { Vt.S }[index], [Xn] |
LoadAndInsertScalar(Vector128<UInt64>, Byte, UInt64*) |
uint64x2_t vld1q_lane_u64 (uint64_t const * ptr, uint64x2_t src, const int lane) A32: VLDR.64 Dd, [Rn] A64: LD1 { Vt.D }[index], [Xn] |
LoadAndInsertScalar(Vector64<Byte>, Byte, Byte*) |
uint8x8_t vld1_lane_u8 (uint8_t const * ptr, uint8x8_t src, const int lane) A32: VLD1.8 { Dd[index] }, [Rn] A64: LD1 { Vt.B }[index], [Xn] |
LoadAndInsertScalar(Vector64<Int16>, Byte, Int16*) |
int16x4_t vld1_lane_s16 (int16_t const * ptr, int16x4_t src, const int lane) A32: VLD1.16 { Dd[index] }, [Rn] A64: LD1 { Vt.H }[index], [Xn] |
LoadAndInsertScalar(Vector64<Int32>, Byte, Int32*) |
int32x2_t vld1_lane_s32 (int32_t const * ptr, int32x2_t src, const int lane) A32: VLD1.32 { Dd[index] }, [Rn] A64: LD1 { Vt.S }[index], [Xn] |
LoadAndInsertScalar(Vector64<SByte>, Byte, SByte*) |
int8x8_t vld1_lane_s8 (int8_t const * ptr, int8x8_t src, const int lane) A32: VLD1.8 { Dd[index] }, [Rn] A64: LD1 { Vt.B }[index], [Xn] |
LoadAndInsertScalar(Vector64<Single>, Byte, Single*) |
float32x2_t vld1_lane_f32 (float32_t const * ptr, float32x2_t src, const int lane) A32: VLD1.32 { Dd[index] }, [Rn] A64: LD1 { Vt.S }[index], [Xn] |
LoadAndInsertScalar(Vector64<UInt16>, Byte, UInt16*) |
uint16x4_t vld1_lane_u16 (uint16_t const * ptr, uint16x4_t src, const int lane) A32: VLD1.16 { Dd[index] }, [Rn] A64: LD1 { Vt.H }[index], [Xn] |
LoadAndInsertScalar(Vector64<UInt32>, Byte, UInt32*) |
uint32x2_t vld1_lane_u32 (uint32_t const * ptr, uint32x2_t src, const int lane) A32: VLD1.32 { Dd[index] }, [Rn] A64: LD1 { Vt.S }[index], [Xn] |
LoadAndReplicateToVector128(Byte*) |
uint8x16_t vld1q_dup_u8 (uint8_t sabit * ptr) A32: VLD1.8 { Dd[], Dd+1[] }, [Rn] A64: LD1R { Vt.16B }, [Xn] |
LoadAndReplicateToVector128(Int16*) |
int16x8_t vld1q_dup_s16 (int16_t sabit * ptr) A32: VLD1.16 { Dd[], Dd+1[] }, [Rn] A64: LD1R { Vt.8H }, [Xn] |
LoadAndReplicateToVector128(Int32*) |
int32x4_t vld1q_dup_s32 (int32_t sabit * ptr) A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] A64: LD1R { Vt.4S }, [Xn] |
LoadAndReplicateToVector128(SByte*) |
int8x16_t vld1q_dup_s8 (int8_t sabit * ptr) A32: VLD1.8 { Dd[], Dd+1[] }, [Rn] A64: LD1R { Vt.16B }, [Xn] |
LoadAndReplicateToVector128(Single*) |
float32x4_t vld1q_dup_f32 (float32_t sabit * ptr) A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] A64: LD1R { Vt.4S }, [Xn] |
LoadAndReplicateToVector128(UInt16*) |
uint16x8_t vld1q_dup_u16 (uint16_t sabit * ptr) A32: VLD1.16 { Dd[], Dd+1[] }, [Rn] A64: LD1R { Vt.8H }, [Xn] |
LoadAndReplicateToVector128(UInt32*) |
uint32x4_t vld1q_dup_u32 (uint32_t sabit * ptr) A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] A64: LD1R { Vt.4S }, [Xn] |
LoadAndReplicateToVector64(Byte*) |
uint8x8_t vld1_dup_u8 (uint8_t sabit * ptr) A32: VLD1.8 { Dd[] }, [Rn] A64: LD1R { Vt.8B }, [Xn] |
LoadAndReplicateToVector64(Int16*) |
int16x4_t vld1_dup_s16 (int16_t sabit * ptr) A32: VLD1.16 { Dd[] }, [Rn] A64: LD1R { Vt.4H }, [Xn] |
LoadAndReplicateToVector64(Int32*) |
int32x2_t vld1_dup_s32 (int32_t sabit * ptr) A32: VLD1.32 { Dd[] }, [Rn] A64: LD1R { Vt.2S }, [Xn] |
LoadAndReplicateToVector64(SByte*) |
int8x8_t vld1_dup_s8 (int8_t sabit * ptr) A32: VLD1.8 { Dd[] }, [Rn] A64: LD1R { Vt.8B }, [Xn] |
LoadAndReplicateToVector64(Single*) |
float32x2_t vld1_dup_f32 (float32_t sabit * ptr) A32: VLD1.32 { Dd[] }, [Rn] A64: LD1R { Vt.2S }, [Xn] |
LoadAndReplicateToVector64(UInt16*) |
uint16x4_t vld1_dup_u16 (uint16_t sabit * ptr) A32: VLD1.16 { Dd[] }, [Rn] A64: LD1R { Vt.4H }, [Xn] |
LoadAndReplicateToVector64(UInt32*) |
uint32x2_t vld1_dup_u32 (uint32_t sabit * ptr) A32: VLD1.32 { Dd[] }, [Rn] A64: LD1R { Vt.2S }, [Xn] |
LoadAndReplicateToVector64x2(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x2(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x2(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x2(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x2(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x2(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x2(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x3(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x3(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x3(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x3(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x3(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x3(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x3(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x4(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x4(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x4(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x4(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x4(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x4(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadAndReplicateToVector64x4(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector128(Byte*) |
uint8x16_t vld1q_u8 (uint8_t sabit * ptr) A32: VLD1.8 Dd, Dd+1, [Rn] A64: LD1 Vt.16B, [Xn] |
LoadVector128(Double*) |
float64x2_t vld1q_f64 (float64_t sabit * ptr) A32: VLD1.64 Dd, Dd+1, [Rn] A64: LD1 Vt.2D, [Xn] |
LoadVector128(Int16*) |
int16x8_t vld1q_s16 (int16_t sabit * ptr) A32: VLD1.16 Dd, Dd+1, [Rn] A64: LD1 Vt.8H, [Xn] |
LoadVector128(Int32*) |
int32x4_t vld1q_s32 (int32_t sabit * ptr) A32: VLD1.32 Dd, Dd+1, [Rn] A64: LD1 Vt.4S, [Xn] |
LoadVector128(Int64*) |
int64x2_t vld1q_s64 (int64_t sabit * ptr) A32: VLD1.64 Dd, Dd+1, [Rn] A64: LD1 Vt.2D, [Xn] |
LoadVector128(SByte*) |
int8x16_t vld1q_s8 (int8_t sabit * ptr) A32: VLD1.8 Dd, Dd+1, [Rn] A64: LD1 Vt.16B, [Xn] |
LoadVector128(Single*) |
float32x4_t vld1q_f32 (float32_t sabit * ptr) A32: VLD1.32 Dd, Dd+1, [Rn] A64: LD1 Vt.4S, [Xn] |
LoadVector128(UInt16*) |
uint16x8_t vld1q_s16 (uint16_t sabit * ptr) A32: VLD1.16 Dd, Dd+1, [Rn] A64: LD1 Vt.8H, [Xn] |
LoadVector128(UInt32*) |
uint32x4_t vld1q_s32 (uint32_t sabit * ptr) A32: VLD1.32 Dd, Dd+1, [Rn] A64: LD1 Vt.4S, [Xn] |
LoadVector128(UInt64*) |
uint64x2_t vld1q_u64 (uint64_t sabit * ptr) A32: VLD1.64 Dd, Dd+1, [Rn] A64: LD1 Vt.2D, [Xn] |
LoadVector64(Byte*) |
uint8x8_t vld1_u8 (uint8_t sabit * ptr) A32: VLD1.8 Dd, [Rn] A64: LD1 Vt.8B, [Xn] |
LoadVector64(Double*) |
float64x1_t vld1_f64 (float64_t sabit * ptr) A32: VLD1.64 Dd, [Rn] A64: LD1 Vt.1D, [Xn] |
LoadVector64(Int16*) |
int16x4_t vld1_s16 (int16_t sabit * ptr) A32: VLD1.16 Dd, [Rn] A64: LD1 Vt.4H, [Xn] |
LoadVector64(Int32*) |
int32x2_t vld1_s32 (int32_t sabit * ptr) A32: VLD1.32 Dd, [Rn] A64: LD1 Vt.2S, [Xn] |
LoadVector64(Int64*) |
int64x1_t vld1_s64 (int64_t sabit * ptr) A32: VLD1.64 Dd, [Rn] A64: LD1 Vt.1D, [Xn] |
LoadVector64(SByte*) |
int8x8_t vld1_s8 (int8_t sabit * ptr) A32: VLD1.8 Dd, [Rn] A64: LD1 Vt.8B, [Xn] |
LoadVector64(Single*) |
float32x2_t vld1_f32 (float32_t sabit * ptr) A32: VLD1.32 Dd, [Rn] A64: LD1 Vt.2S, [Xn] |
LoadVector64(UInt16*) |
uint16x4_t vld1_u16 (uint16_t sabit * ptr) A32: VLD1.16 Dd, [Rn] A64: LD1 Vt.4H, [Xn] |
LoadVector64(UInt32*) |
uint32x2_t vld1_u32 (uint32_t sabit * ptr) A32: VLD1.32 Dd, [Rn] A64: LD1 Vt.2S, [Xn] |
LoadVector64(UInt64*) |
uint64x1_t vld1_u64 (uint64_t sabit * ptr) A32: VLD1.64 Dd, [Rn] A64: LD1 Vt.1D, [Xn] |
LoadVector64x2(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2AndUnzip(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2AndUnzip(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2AndUnzip(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2AndUnzip(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2AndUnzip(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2AndUnzip(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x2AndUnzip(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3AndUnzip(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3AndUnzip(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3AndUnzip(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3AndUnzip(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3AndUnzip(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3AndUnzip(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x3AndUnzip(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4AndUnzip(Byte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4AndUnzip(Int16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4AndUnzip(Int32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4AndUnzip(SByte*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4AndUnzip(Single*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4AndUnzip(UInt16*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
LoadVector64x4AndUnzip(UInt32*) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
Max(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vmaxq_u8 (a, uint8x16_t b uint8x16_t) A32: VMAX. U8 Qd, Qn, Qm A64: UMAX Vd.16B, Vn.16B, Vm.16B |
Max(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vmaxq_s16 (a, int16x8_t b int16x8_t) A32: VMAX. S16 Qd, Qn, Qm A64: SMAX Vd.8H, Vn.8H, Vm.8H |
Max(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vmaxq_s32 (int32x4_t a, int32x4_t b) A32: VMAX. S32 Qd, Qn, Qm A64: SMAX Vd.4S, Vn.4S, Vm.4S |
Max(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vmaxq_s8 (a, int8x16_t b int8x16_t) A32: VMAX. S8 Qd, Qn, Qm A64: SMAX Vd.16B, Vn.16B, Vm.16B |
Max(Vector128<Single>, Vector128<Single>) |
float32x4_t vmaxq_f32 (float32x4_t a, float32x4_t b) A32: VMAX. F32 Qd, Qn, Qm A64: FMAX Vd.4S, Vn.4S, Vm.4S |
Max(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vmaxq_u16 (uint16x8_t a, uint16x8_t b) A32: VMAX. U16 Qd, Qn, Qm A64: UMAX Vd.8H, Vn.8H, Vm.8H |
Max(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vmaxq_u32 (uint32x4_t a, uint32x4_t b) A32: VMAX. U32 Qd, Qn, Qm A64: UMAX Vd.4S, Vn.4S, Vm.4S |
Max(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vmax_u8 (uint8x8_t a, uint8x8_t b) A32: VMAX. U8 Dd, Dn, Dm A64: UMAX Vd.8B, Vn.8B, Vm.8B |
Max(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vmax_s16 (int16x4_t a, int16x4_t b) A32: VMAX. S16 Dd, Dn, Dm A64: SMAX Vd.4H, Vn.4H, Vm.4H |
Max(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vmax_s32 (int32x2_t a, int32x2_t b) A32: VMAX. S32 Dd, Dn, Dm A64: SMAX Vd.2S, Vn.2S, Vm.2S |
Max(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vmax_s8 (int8x8_t a, int8x8_t b) A32: VMAX. S8 Dd, Dn, Dm A64: SMAX Vd.8B, Vn.8B, Vm.8B |
Max(Vector64<Single>, Vector64<Single>) |
float32x2_t vmax_f32 (float32x2_t a, float32x2_t b) A32: VMAX. F32 Dd, Dn, Dm A64: FMAX Vd.2S, Vn.2S, Vm.2S |
Max(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vmax_u16 (uint16x4_t a, uint16x4_t b) A32: VMAX. U16 Dd, Dn, Dm A64: UMAX Vd.4H, Vn.4H, Vm.4H |
Max(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vmax_u32 (uint32x2_t a, uint32x2_t b) A32: VMAX. U32 Dd, Dn, Dm A64: UMAX Vd.2S, Vn.2S, Vm.2S |
MaxNumber(Vector128<Single>, Vector128<Single>) |
float32x4_t vmaxnmq_f32 (float32x4_t a, float32x4_t b) A32: VMAXNM. F32 Qd, Qn, Qm A64: FMAXNM Vd.4S, Vn.4S, Vm.4S |
MaxNumber(Vector64<Single>, Vector64<Single>) |
float32x2_t vmaxnm_f32 (a, float32x2_t b float32x2_t) A32: VMAXNM. F32 Dd, Dn, Dm A64: FMAXNM Vd.2S, Vn.2S, Vm.2S |
MaxNumberScalar(Vector64<Double>, Vector64<Double>) |
float64x1_t vmaxnm_f64 (float64x1_t a, float64x1_t b) A32: VMAXNM. F64 Dd, Dn, Dm A64: FMAXNM Dd, Dn, Dm |
MaxNumberScalar(Vector64<Single>, Vector64<Single>) |
float32_t vmaxnms_f32 (a, float32_t b float32_t) A32: VMAXNM. F32 Sd, Sn, Sm A64: FMAXNM Sd, Sn, Sm |
MaxPairwise(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vpmax_u8 (uint8x8_t a, uint8x8_t b) A32: VPMAX. U8 Dd, Dn, Dm A64: UMAXP Vd.8B, Vn.8B, Vm.8B |
MaxPairwise(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vpmax_s16 (int16x4_t a, int16x4_t b) A32: VPMAX. S16 Dd, Dn, Dm A64: SMAXP Vd.4H, Vn.4H, Vm.4H |
MaxPairwise(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vpmax_s32 (a, int32x2_t b int32x2_t) A32: VPMAX. S32 Dd, Dn, Dm A64: SMAXP Vd.2S, Vn.2S, Vm.2S |
MaxPairwise(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vpmax_s8 (int8x8_t a, int8x8_t b) A32: VPMAX. S8 Dd, Dn, Dm A64: SMAXP Vd.8B, Vn.8B, Vm.8B |
MaxPairwise(Vector64<Single>, Vector64<Single>) |
float32x2_t vpmax_f32 (float32x2_t a, float32x2_t b) A32: VPMAX. F32 Dd, Dn, Dm A64: FMAXP Vd.2S, Vn.2S, Vm.2S |
MaxPairwise(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vpmax_u16 (a, uint16x4_t b uint16x4_t) A32: VPMAX. U16 Dd, Dn, Dm A64: UMAXP Vd.4H, Vn.4H, Vm.4H |
MaxPairwise(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vpmax_u32 (uint32x2_t a, uint32x2_t b) A32: VPMAX. U32 Dd, Dn, Dm A64: UMAXP Vd.2S, Vn.2S, Vm.2S |
MemberwiseClone() |
Geçerli Objectöğesinin sığ bir kopyasını oluşturur. (Devralındığı yer: Object) |
Min(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vminq_u8 (uint8x16_t a, uint8x16_t b) A32: VMIN. U8 Qd, Qn, Qm A64: UMIN Vd.16B, Vn.16B, Vm.16B |
Min(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vminq_s16 (int16x8_t a, int16x8_t b) A32: VMIN. S16 Qd, Qn, Qm A64: SMIN Vd.8H, Vn.8H, Vm.8H |
Min(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vminq_s32 (int32x4_t a, int32x4_t b) A32: VMIN. S32 Qd, Qn, Qm A64: SMIN Vd.4S, Vn.4S, Vm.4S |
Min(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vminq_s8 (int8x16_t a, int8x16_t b) A32: VMIN. S8 Qd, Qn, Qm A64: SMIN Vd.16B, Vn.16B, Vm.16B |
Min(Vector128<Single>, Vector128<Single>) |
float32x4_t vminq_f32 (float32x4_t a, float32x4_t b) A32: VMIN. F32 Qd, Qn, Qm A64: FMIN Vd.4S, Vn.4S, Vm.4S |
Min(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vminq_u16 (uint16x8_t a, uint16x8_t b) A32: VMIN. U16 Qd, Qn, Qm A64: UMIN Vd.8H, Vn.8H, Vm.8H |
Min(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vminq_u32 (uint32x4_t a, uint32x4_t b) A32: VMIN. U32 Qd, Qn, Qm A64: UMIN Vd.4S, Vn.4S, Vm.4S |
Min(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vmin_u8 (a, uint8x8_t b uint8x8_t) A32: VMIN. U8 Dd, Dn, Dm A64: UMIN Vd.8B, Vn.8B, Vm.8B |
Min(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vmin_s16 (int16x4_t a, int16x4_t b) A32: VMIN. S16 Dd, Dn, Dm A64: SMIN Vd.4H, Vn.4H, Vm.4H |
Min(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vmin_s32 (int32x2_t a, int32x2_t b) A32: VMIN. S32 Dd, Dn, Dm A64: SMIN Vd.2S, Vn.2S, Vm.2S |
Min(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vmin_s8 (int8x8_t a, int8x8_t b) A32: VMIN. S8 Dd, Dn, Dm A64: SMIN Vd.8B, Vn.8B, Vm.8B |
Min(Vector64<Single>, Vector64<Single>) |
float32x2_t vmin_f32 (float32x2_t a, float32x2_t b) A32: VMIN. F32 Dd, Dn, Dm A64: FMIN Vd.2S, Vn.2S, Vm.2S |
Min(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vmin_u16 (uint16x4_t a, uint16x4_t b) A32: VMIN. U16 Dd, Dn, Dm A64: UMIN Vd.4H, Vn.4H, Vm.4H |
Min(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vmin_u32 (uint32x2_t a, uint32x2_t b) A32: VMIN. U32 Dd, Dn, Dm A64: UMIN Vd.2S, Vn.2S, Vm.2S |
MinNumber(Vector128<Single>, Vector128<Single>) |
float32x4_t vminnmq_f32 (float32x4_t a, float32x4_t b) A32: VMINNM. F32 Qd, Qn, Qm A64: FMINNM Vd.4S, Vn.4S, Vm.4S |
MinNumber(Vector64<Single>, Vector64<Single>) |
float32x2_t vminnm_f32 (float32x2_t a, float32x2_t b) A32: VMINNM. F32 Dd, Dn, Dm A64: FMINNM Vd.2S, Vn.2S, Vm.2S |
MinNumberScalar(Vector64<Double>, Vector64<Double>) |
float64x1_t vminnm_f64 (float64x1_t a, float64x1_t b) A32: VMINNM. F64 Dd, Dn, Dm A64: FMINNM Dd, Dn, Dm |
MinNumberScalar(Vector64<Single>, Vector64<Single>) |
float32_t vminnms_f32 (float32_t a, float32_t b) A32: VMINNM. F32 Sd, Sn, Sm A64: FMINNM Sd, Sn, Sm |
MinPairwise(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vpmin_u8 (uint8x8_t a, uint8x8_t b) A32: VPMIN. U8 Dd, Dn, Dm A64: UMINP Vd.8B, Vn.8B, Vm.8B |
MinPairwise(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vpmin_s16 (int16x4_t a, int16x4_t b) A32: VPMIN. S16 Dd, Dn, Dm A64: SMINP Vd.4H, Vn.4H, Vm.4H |
MinPairwise(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vpmin_s32 (a, int32x2_t b int32x2_t) A32: VPMIN. S32 Dd, Dn, Dm A64: SMINP Vd.2S, Vn.2S, Vm.2S |
MinPairwise(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vpmin_s8 (a, int8x8_t b int8x8_t) A32: VPMIN. S8 Dd, Dn, Dm A64: SMINP Vd.8B, Vn.8B, Vm.8B |
MinPairwise(Vector64<Single>, Vector64<Single>) |
float32x2_t vpmin_f32 (a, float32x2_t b float32x2_t) A32: VPMIN. F32 Dd, Dn, Dm A64: FMINP Vd.2S, Vn.2S, Vm.2S |
MinPairwise(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vpmin_u16 (uint16x4_t a, uint16x4_t b) A32: VPMIN. U16 Dd, Dn, Dm A64: UMINP Vd.4H, Vn.4H, Vm.4H |
MinPairwise(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vpmin_u32 (uint32x2_t a, uint32x2_t b) A32: VPMIN. U32 Dd, Dn, Dm A64: UMINP Vd.2S, Vn.2S, Vm.2S |
Multiply(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vmulq_u8 (uint8x16_t a, uint8x16_t b) A32: VMUL. I8 Qd, Qn, Qm A64: MUL Vd.16B, Vn.16B, Vm.16B |
Multiply(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vmulq_s16 (int16x8_t a, int16x8_t b) A32: VMUL. I16 Qd, Qn, Qm A64: MUL Vd.8H, Vn.8H, Vm.8H |
Multiply(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vmulq_s32 (int32x4_t a, int32x4_t b) A32: VMUL. I32 Qd, Qn, Qm A64: MUL Vd.4S, Vn.4S, Vm.4S |
Multiply(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vmulq_s8 (int8x16_t a, int8x16_t b) A32: VMUL. I8 Qd, Qn, Qm A64: MUL Vd.16B, Vn.16B, Vm.16B |
Multiply(Vector128<Single>, Vector128<Single>) |
float32x4_t vmulq_f32 (float32x4_t a, float32x4_t b) A32: VMUL. F32 Qd, Qn, Qm A64: FMUL Vd.4S, Vn.4S, Vm.4S |
Multiply(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vmulq_u16 (uint16x8_t a, uint16x8_t b) A32: VMUL. I16 Qd, Qn, Qm A64: MUL Vd.8H, Vn.8H, Vm.8H |
Multiply(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vmulq_u32 (uint32x4_t a, uint32x4_t b) A32: VMUL. I32 Qd, Qn, Qm A64: MUL Vd.4S, Vn.4S, Vm.4S |
Multiply(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vmul_u8 (a, uint8x8_t b uint8x8_t) A32: VMUL. I8 Dd, Dn, Dm A64: MUL Vd.8B, Vn.8B, Vm.8B |
Multiply(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vmul_s16 (int16x4_t a, int16x4_t b) A32: VMUL. I16 Dd, Dn, Dm A64: MUL Vd.4H, Vn.4H, Vm.4H |
Multiply(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vmul_s32 (int32x2_t a, int32x2_t b) A32: VMUL. I32 Dd, Dn, Dm A64: MUL Vd.2S, Vn.2S, Vm.2S |
Multiply(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vmul_s8 (int8x8_t a, int8x8_t b) A32: VMUL. I8 Dd, Dn, Dm A64: MUL Vd.8B, Vn.8B, Vm.8B |
Multiply(Vector64<Single>, Vector64<Single>) |
float32x2_t vmul_f32 (a, float32x2_t b float32x2_t) A32: VMUL. F32 Dd, Dn, Dm A64: FMUL Vd.2S, Vn.2S, Vm.2S |
Multiply(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vmul_u16 (uint16x4_t a, uint16x4_t b) A32: VMUL. I16 Dd, Dn, Dm A64: MUL Vd.4H, Vn.4H, Vm.4H |
Multiply(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vmul_u32 (uint32x2_t a, uint32x2_t b) A32: VMUL. I32 Dd, Dn, Dm A64: MUL Vd.2S, Vn.2S, Vm.2S |
MultiplyAdd(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vmlaq_u8 (a, uint8x16_t b, uint8x16_t c uint8x16_t) A32: VMLA. I8 Qd, Qn, Qm A64: MLA Vd.16B, Vn.16B, Vm.16B |
MultiplyAdd(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>) |
int16x8_t vmlaq_s16 (a, int16x8_t b, int16x8_t c int16x8_t) A32: VMLA. I16 Qd, Qn, Qm A64: MLA Vd.8H, Vn.8H, Vm.8H |
MultiplyAdd(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>) |
int32x4_t vmlaq_s32 (a, int32x4_t b, int32x4_t c int32x4_t) A32: VMLA. I32 Qd, Qn, Qm A64: MLA Vd.4S, Vn.4S, Vm.4S |
MultiplyAdd(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>) |
int8x16_t vmlaq_s8 (a, int8x16_t b, int8x16_t c int8x16_t) A32: VMLA. I8 Qd, Qn, Qm A64: MLA Vd.16B, Vn.16B, Vm.16B |
MultiplyAdd(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vmlaq_u16 (a, uint16x8_t b, uint16x8_t c uint16x8_t) A32: VMLA. I16 Qd, Qn, Qm A64: MLA Vd.8H, Vn.8H, Vm.8H |
MultiplyAdd(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vmlaq_u32 (a, uint32x4_t b, uint32x4_t c uint32x4_t) A32: VMLA. I32 Qd, Qn, Qm A64: MLA Vd.4S, Vn.4S, Vm.4S |
MultiplyAdd(Vector64<Byte>, Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vmla_u8 (a, uint8x8_t b, uint8x8_t c uint8x8_t) A32: VMLA. I8 Dd, Dn, Dm A64: MLA Vd.8B, Vn.8B, Vm.8B |
MultiplyAdd(Vector64<Int16>, Vector64<Int16>, Vector64<Int16>) |
int16x4_t vmla_s16 (a, int16x4_t b, int16x4_t c int16x4_t) A32: VMLA. I16 Dd, Dn, Dm A64: MLA Vd.4H, Vn.4H, Vm.4H |
MultiplyAdd(Vector64<Int32>, Vector64<Int32>, Vector64<Int32>) |
int32x2_t vmla_s32 (a, int32x2_t b, int32x2_t c int32x2_t) A32: VMLA. I32 Dd, Dn, Dm A64: MLA Vd.2S, Vn.2S, Vm.2S |
MultiplyAdd(Vector64<SByte>, Vector64<SByte>, Vector64<SByte>) |
int8x8_t vmla_s8 (a, int8x8_t b, int8x8_t c int8x8_t) A32: VMLA. I8 Dd, Dn, Dm A64: MLA Vd.8B, Vn.8B, Vm.8B |
MultiplyAdd(Vector64<UInt16>, Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vmla_u16 (a, uint16x4_t b, uint16x4_t c uint16x4_t) A32: VMLA. I16 Dd, Dn, Dm A64: MLA Vd.4H, Vn.4H, Vm.4H |
MultiplyAdd(Vector64<UInt32>, Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vmla_u32 (a, uint32x2_t b, uint32x2_t c uint32x2_t) A32: VMLA. I32 Dd, Dn, Dm A64: MLA Vd.2S, Vn.2S, Vm.2S |
MultiplyAddByScalar(Vector128<Int16>, Vector128<Int16>, Vector64<Int16>) |
int16x8_t vmlaq_n_s16 (a, int16x8_t b, int16_t c int16x8_t) A32: VMLA. I16 Qd, Qn, Dm[0] A64: MLA Vd.8H, Vn.8H, Vm.H[0] |
MultiplyAddByScalar(Vector128<Int32>, Vector128<Int32>, Vector64<Int32>) |
int32x4_t vmlaq_n_s32 (a, int32x4_t b, int32_t c int32x4_t) A32: VMLA. I32 Qd, Qn, Dm[0] A64: MLA Vd.4S, Vn.4S, Vm.S[0] |
MultiplyAddByScalar(Vector128<UInt16>, Vector128<UInt16>, Vector64<UInt16>) |
uint16x8_t vmlaq_n_u16 (a, uint16x8_t b, uint16_t c uint16x8_t) A32: VMLA. I16 Qd, Qn, Dm[0] A64: MLA Vd.8H, Vn.8H, Vm.H[0] |
MultiplyAddByScalar(Vector128<UInt32>, Vector128<UInt32>, Vector64<UInt32>) |
uint32x4_t vmlaq_n_u32 (a, uint32x4_t b, uint32_t c uint32x4_t) A32: VMLA. I32 Qd, Qn, Dm[0] A64: MLA Vd.4S, Vn.4S, Vm.S[0] |
MultiplyAddByScalar(Vector64<Int16>, Vector64<Int16>, Vector64<Int16>) |
int16x4_t vmla_n_s16 (a, int16x4_t b, int16_t c int16x4_t) A32: VMLA. I16 Dd, Dn, Dm[0] A64: MLA Vd.4H, Vn.4H, Vm.H[0] |
MultiplyAddByScalar(Vector64<Int32>, Vector64<Int32>, Vector64<Int32>) |
int32x2_t vmla_n_s32 (a, int32x2_t b, int32_t c int32x2_t) A32: VMLA. I32 Dd, Dn, Dm[0] A64: MLA Vd.2S, Vn.2S, Vm.S[0] |
MultiplyAddByScalar(Vector64<UInt16>, Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vmla_n_u16 (a, uint16x4_t b, uint16_t c uint16x4_t) A32: VMLA. I16 Dd, Dn, Dm[0] A64: MLA Vd.4H, Vn.4H, Vm.H[0] |
MultiplyAddByScalar(Vector64<UInt32>, Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vmla_n_u32 (a, uint32x2_t b, uint32_t c uint32x2_t) A32: VMLA. I32 Dd, Dn, Dm[0] A64: MLA Vd.2S, Vn.2S, Vm.S[0] |
MultiplyAddBySelectedScalar(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vmlaq_laneq_s16 (a, int16x8_t b, int16x8_t v, const int lane int16x8_t) A32: VMLA. I16 Qd, Qn, Dm[lane] A64: MLA Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyAddBySelectedScalar(Vector128<Int16>, Vector128<Int16>, Vector64<Int16>, Byte) |
int16x8_t vmlaq_lane_s16 (int16x8_t a, int16x8_t b, int16x4_t v, const int lane) A32: VMLA. I16 Qd, Qn, Dm[lane] A64: MLA Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyAddBySelectedScalar(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vmlaq_laneq_s32 (int32x4_t a, int32x4_t b, int32x4_t v, const int lane) A32: VMLA. I32 Qd, Qn, Dm[lane] A64: MLA Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyAddBySelectedScalar(Vector128<Int32>, Vector128<Int32>, Vector64<Int32>, Byte) |
int32x4_t vmlaq_lane_s32 (a, int32x4_t b, int32x2_t v, const int lane int32x4_t) A32: VMLA. I32 Qd, Qn, Dm[lane] A64: MLA Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyAddBySelectedScalar(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint16x8_t vmlaq_laneq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t v, const int lane) A32: VMLA. I16 Qd, Qn, Dm[lane] A64: MLA Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyAddBySelectedScalar(Vector128<UInt16>, Vector128<UInt16>, Vector64<UInt16>, Byte) |
uint16x8_t vmlaq_lane_u16 (uint16x8_t a, uint16x8_t b, uint16x4_t v, const int lane) A32: VMLA. I16 Qd, Qn, Dm[lane] A64: MLA Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyAddBySelectedScalar(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint32x4_t vmlaq_laneq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t v, const int lane) A32: VMLA. I32 Qd, Qn, Dm[lane] A64: MLA Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyAddBySelectedScalar(Vector128<UInt32>, Vector128<UInt32>, Vector64<UInt32>, Byte) |
uint32x4_t vmlaq_lane_u32 (uint32x4_t a, uint32x4_t b, uint32x2_t v, const int lane) A32: VMLA. I32 Qd, Qn, Dm[lane] A64: MLA Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyAddBySelectedScalar(Vector64<Int16>, Vector64<Int16>, Vector128<Int16>, Byte) |
int16x4_t vmla_laneq_s16 (a, int16x4_t b, int16x8_t v, const int lane int16x4_t) A32: VMLA. I16 Dd, Dn, Dm[lane] A64: MLA Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyAddBySelectedScalar(Vector64<Int16>, Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vmla_lane_s16 (a, int16x4_t b, int16x4_t v, const int lane int16x4_t) A32: VMLA. I16 Dd, Dn, Dm[lane] A64: MLA Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyAddBySelectedScalar(Vector64<Int32>, Vector64<Int32>, Vector128<Int32>, Byte) |
int32x2_t vmla_laneq_s32 (int32x2_t a, int32x2_t b, int32x4_t v, const int lane) A32: VMLA. I32 Dd, Dn, Dm[lane] A64: MLA Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyAddBySelectedScalar(Vector64<Int32>, Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vmla_lane_s32 (int32x2_t a, int32x2_t b, int32x2_t v, const int lane) A32: VMLA. I32 Dd, Dn, Dm[lane] A64: MLA Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyAddBySelectedScalar(Vector64<UInt16>, Vector64<UInt16>, Vector128<UInt16>, Byte) |
uint16x4_t vmla_laneq_u16 (uint16x4_t a, uint16x4_t b, uint16x8_t v, const int lane) A32: VMLA. I16 Dd, Dn, Dm[lane] A64: MLA Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyAddBySelectedScalar(Vector64<UInt16>, Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint16x4_t vmla_lane_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t v, const int lane) A32: VMLA. I16 Dd, Dn, Dm[lane] A64: MLA Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyAddBySelectedScalar(Vector64<UInt32>, Vector64<UInt32>, Vector128<UInt32>, Byte) |
uint32x2_t vmla_laneq_u32 (a, uint32x2_t b, uint32x4_t v, const int lane uint32x2_t) A32: VMLA. I32 Dd, Dn, Dm[lane] A64: MLA Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyAddBySelectedScalar(Vector64<UInt32>, Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint32x2_t vmla_lane_u32 (a, uint32x2_t b, uint32x2_t v, const int lane uint32x2_t) A32: VMLA. I32 Dd, Dn, Dm[lane] A64: MLA Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyByScalar(Vector128<Int16>, Vector64<Int16>) |
int16x8_t vmulq_n_s16 (int16x8_t a, int16_t b) A32: VMUL. I16 Qd, Qn, Dm[0] A64: MUL Vd.8H, Vn.8H, Vm.H[0] |
MultiplyByScalar(Vector128<Int32>, Vector64<Int32>) |
int32x4_t vmulq_n_s32 (int32x4_t a, int32_t b) A32: VMUL. I32 Qd, Qn, Dm[0] A64: MUL Vd.4S, Vn.4S, Vm.S[0] |
MultiplyByScalar(Vector128<Single>, Vector64<Single>) |
float32x4_t vmulq_n_f32 (a, float32_t b float32x4_t) A32: VMUL. F32 Qd, Qn, Dm[0] A64: FMUL Vd.4S, Vn.4S, Vm.S[0] |
MultiplyByScalar(Vector128<UInt16>, Vector64<UInt16>) |
uint16x8_t vmulq_n_u16 (uint16x8_t a, uint16_t b) A32: VMUL. I16 Qd, Qn, Dm[0] A64: MUL Vd.8H, Vn.8H, Vm.H[0] |
MultiplyByScalar(Vector128<UInt32>, Vector64<UInt32>) |
uint32x4_t vmulq_n_u32 (uint32x4_t a, uint32_t b) A32: VMUL. I32 Qd, Qn, Dm[0] A64: MUL Vd.4S, Vn.4S, Vm.S[0] |
MultiplyByScalar(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vmul_n_s16 (int16x4_t a, int16_t b) A32: VMUL. I16 Dd, Dn, Dm[0] A64: MUL Vd.4H, Vn.4H, Vm.H[0] |
MultiplyByScalar(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vmul_n_s32 (int32x2_t a, int32_t b) A32: VMUL. I32 Dd, Dn, Dm[0] A64: MUL Vd.2S, Vn.2S, Vm.S[0] |
MultiplyByScalar(Vector64<Single>, Vector64<Single>) |
float32x2_t vmul_n_f32 (a, float32_t b float32x2_t) A32: VMUL. F32 Dd, Dn, Dm[0] A64: FMUL Vd.2S, Vn.2S, Vm.S[0] |
MultiplyByScalar(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vmul_n_u16 (uint16x4_t a, uint16_t b) A32: VMUL. I16 Dd, Dn, Dm[0] A64: MUL Vd.4H, Vn.4H, Vm.H[0] |
MultiplyByScalar(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vmul_n_u32 (uint32x2_t a, uint32_t b) A32: VMUL. I32 Dd, Dn, Dm[0] A64: MUL Vd.2S, Vn.2S, Vm.S[0] |
MultiplyBySelectedScalar(Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vmulq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) A32: VMUL. I16 Qd, Qn, Dm[lane] A64: MUL Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalar(Vector128<Int16>, Vector64<Int16>, Byte) |
int16x8_t vmulq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) A32: VMUL. I16 Qd, Qn, Dm[lane] A64: MUL Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalar(Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vmulq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) A32: VMUL. I32 Qd, Qn, Dm[lane] A64: MUL Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector128<Int32>, Vector64<Int32>, Byte) |
int32x4_t vmulq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) A32: VMUL. I32 Qd, Qn, Dm[lane] A64: MUL Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector128<Single>, Vector128<Single>, Byte) |
float32x4_t vmulq_laneq_f32 (a, float32x4_t v, const int lane float32x4_t) A32: VMUL. F32 Qd, Qn, Dm[lane] A64: FMUL Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector128<Single>, Vector64<Single>, Byte) |
float32x4_t vmulq_lane_f32 (float32x4_t a, float32x2_t v, const int lane) A32: VMUL. F32 Qd, Qn, Dm[lane] A64: FMUL Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint16x8_t vmulq_laneq_u16 (uint16x8_t a, uint16x8_t v, const int lane) A32: VMUL. I16 Qd, Qn, Dm[lane] A64: MUL Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalar(Vector128<UInt16>, Vector64<UInt16>, Byte) |
uint16x8_t vmulq_lane_u16 (uint16x8_t a, uint16x4_t v, const int lane) A32: VMUL. I16 Qd, Qn, Dm[lane] A64: MUL Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalar(Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint32x4_t vmulq_laneq_u32 (uint32x4_t a, uint32x4_t v, const int lane) A32: VMUL. I32 Qd, Qn, Dm[lane] A64: MUL Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector128<UInt32>, Vector64<UInt32>, Byte) |
uint32x4_t vmulq_lane_u32 (uint32x4_t a, uint32x2_t v, const int lane) A32: VMUL. I32 Qd, Qn, Dm[lane] A64: MUL Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector64<Int16>, Vector128<Int16>, Byte) |
int16x4_t vmul_laneq_s16 (a, int16x8_t v, const int lane int16x4_t) A32: VMUL. I16 Dd, Dn, Dm[lane] A64: MUL Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalar(Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vmul_lane_s16 (a, int16x4_t v, const int lane int16x4_t) A32: VMUL. I16 Dd, Dn, Dm[lane] A64: MUL Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalar(Vector64<Int32>, Vector128<Int32>, Byte) |
int32x2_t vmul_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) A32: VMUL. I32 Dd, Dn, Dm[lane] A64: MUL Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vmul_lane_s32 (a, int32x2_t v, const int lane int32x2_t) A32: VMUL. I32 Dd, Dn, Dm[lane] A64: MUL Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector64<Single>, Vector128<Single>, Byte) |
float32x2_t vmul_laneq_f32 (float32x2_t a, float32x4_t v, const int lane) A32: VMUL. F32 Dd, Dn, Dm[lane] A64: FMUL Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector64<Single>, Vector64<Single>, Byte) |
float32x2_t vmul_lane_f32 (float32x2_t a, float32x2_t v, const int lane) A32: VMUL. F32 Dd, Dn, Dm[lane] A64: FMUL Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector64<UInt16>, Vector128<UInt16>, Byte) |
uint16x4_t vmul_laneq_u16 (uint16x4_t a, uint16x8_t v, const int lane) A32: VMUL. I16 Dd, Dn, Dm[lane] A64: MUL Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalar(Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint16x4_t vmul_lane_u16 (uint16x4_t a, uint16x4_t v, const int lane) A32: VMUL. I16 Dd, Dn, Dm[lane] A64: MUL Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalar(Vector64<UInt32>, Vector128<UInt32>, Byte) |
uint32x2_t vmul_laneq_u32 (uint32x2_t a, uint32x4_t v, const int lane) A32: VMUL. I32 Dd, Dn, Dm[lane] A64: MUL Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalar(Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint32x2_t vmul_lane_u32 (uint32x2_t a, uint32x2_t v, const int lane) A32: VMUL. I32 Dd, Dn, Dm[lane] A64: MUL Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLower(Vector64<Int16>, Vector128<Int16>, Byte) |
int32x4_t vmull_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) A32: VMULL. S16 Qd, Dn, Dm[lane] A64: SMULL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLower(Vector64<Int16>, Vector64<Int16>, Byte) |
int32x4_t vmull_lane_s16 (int16x4_t a, int16x4_t v, const int lane) A32: VMULL. S16 Qd, Dn, Dm[lane] A64: SMULL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLower(Vector64<Int32>, Vector128<Int32>, Byte) |
int64x2_t vmull_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) A32: VMULL. S32 Qd, Dn, Dm[lane] A64: SMULL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLower(Vector64<Int32>, Vector64<Int32>, Byte) |
int64x2_t vmull_lane_s32 (int32x2_t a, int32x2_t v, const int lane) A32: VMULL. S32 Qd, Dn, Dm[lane] A64: SMULL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLower(Vector64<UInt16>, Vector128<UInt16>, Byte) |
uint32x4_t vmull_laneq_u16 (uint16x4_t a, uint16x8_t v, const int lane) A32: VMULL. U16 Qd, Dn, Dm[lane] A64: UMULL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLower(Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint32x4_t vmull_lane_u16 (a, uint16x4_t v, const int lane uint16x4_t) A32: VMULL. U16 Qd, Dn, Dm[lane] A64: UMULL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLower(Vector64<UInt32>, Vector128<UInt32>, Byte) |
uint64x2_t vmull_laneq_u32 (uint32x2_t a, uint32x4_t v, const int lane) A32: VMULL. U32 Qd, Dn, Dm[lane] A64: UMULL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLower(Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint64x2_t vmull_lane_u32 (uint32x2_t a, uint32x2_t v, const int lane) A32: VMULL. U32 Qd, Dn, Dm[lane] A64: UMULL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<Int32>, Vector64<Int16>, Vector128<Int16>, Byte) |
int32x4_t vmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) A32: VMLAL. S16 Qd, Dn, Dm[lane] A64: SMLAL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>, Byte) |
int32x4_t vmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) A32: VMLAL. S16 Qd, Dn, Dm[lane] A64: SMLAL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<Int64>, Vector64<Int32>, Vector128<Int32>, Byte) |
int64x2_t vmlal_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) A32: VMLAL. S32 Qd, Dn, Dm[lane] A64: SMLAL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>, Byte) |
int64x2_t vmlal_lane_s32 (a, int32x2_t b, int32x2_t v, const int lane int64x2_t) A32: VMLAL. S32 Qd, Dn, Dm[lane] A64: SMLAL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<UInt32>, Vector64<UInt16>, Vector128<UInt16>, Byte) |
uint32x4_t vmlal_laneq_u16 (a, uint16x4_t b, uint16x8_t v, const int lane uint32x4_t) A32: VMLAL. U16 Qd, Dn, Dm[lane] A64: UMLAL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<UInt32>, Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint32x4_t vmlal_lane_u16 (a, uint16x4_t b, uint16x4_t v, const int lane uint32x4_t) A32: VMLAL. U16 Qd, Dn, Dm[lane] A64: UMLAL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<UInt64>, Vector64<UInt32>, Vector128<UInt32>, Byte) |
uint64x2_t vmlal_laneq_u32 (uint64x2_t a, uint32x2_t b, uint32x4_t v, const int lane) A32: VMLAL. U32 Qd, Dn, Dm[lane] A64: UMLAL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<UInt64>, Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint64x2_t vmlal_lane_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t v, const int lane) A32: VMLAL. U32 Qd, Dn, Dm[lane] A64: UMLAL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<Int32>, Vector64<Int16>, Vector128<Int16>, Byte) |
int32x4_t vmlsl_laneq_s16 (a, int16x4_t b, int16x8_t v, const int lane int32x4_t) A32: VMLSL. S16 Qd, Dn, Dm[lane] A64: SMLSL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>, Byte) |
int32x4_t vmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) A32: VMLSL. S16 Qd, Dn, Dm[lane] A64: SMLSL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<Int64>, Vector64<Int32>, Vector128<Int32>, Byte) |
int64x2_t vmlsl_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) A32: VMLSL. S32 Qd, Dn, Dm[lane] A64: SMLSL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>, Byte) |
int64x2_t vmlsl_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) A32: VMLSL. S32 Qd, Dn, Dm[lane] A64: SMLSL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<UInt32>, Vector64<UInt16>, Vector128<UInt16>, Byte) |
uint32x4_t vmlsl_laneq_u16 (uint32x4_t a, uint16x4_t b, uint16x8_t v, const int lane) A32: VMLSL. U16 Qd, Dn, Dm[lane] A64: UMLSL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<UInt32>, Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint32x4_t vmlsl_lane_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t v, const int lane) A32: VMLSL. U16 Qd, Dn, Dm[lane] A64: UMLSL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<UInt64>, Vector64<UInt32>, Vector128<UInt32>, Byte) |
uint64x2_t vmlsl_laneq_u32 (uint64x2_t a, uint32x2_t b, uint32x4_t v, const int lane) A32: VMLSL. U32 Qd, Dn, Dm[lane] A64: UMLSL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<UInt64>, Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint64x2_t vmlsl_lane_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t v, const int lane) A32: VMLSL. U32 Qd, Dn, Dm[lane] A64: UMLSL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpper(Vector128<Int16>, Vector128<Int16>, Byte) |
int32x4_t vmull_high_laneq_s16 (a, int16x8_t v, const int lane int16x8_t) A32: VMULL. S16 Qd, Dn+1, Dm[lane] A64: SMULL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpper(Vector128<Int16>, Vector64<Int16>, Byte) |
int32x4_t vmull_high_lane_s16 (a, int16x4_t v, const int lane int16x8_t) A32: VMULL. S16 Qd, Dn+1, Dm[lane] A64: SMULL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpper(Vector128<Int32>, Vector128<Int32>, Byte) |
int64x2_t vmull_high_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) A32: VMULL. S32 Qd, Dn+1, Dm[lane] A64: SMULL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpper(Vector128<Int32>, Vector64<Int32>, Byte) |
int64x2_t vmull_high_lane_s32 (int32x4_t a, int32x2_t v, const int lane) A32: VMULL. S32 Qd, Dn+1, Dm[lane] A64: SMULL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpper(Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint32x4_t vmull_high_laneq_u16 (uint16x8_t a, uint16x8_t v, const int lane) A32: VMULL. U16 Qd, Dn+1, Dm[lane] A64: UMULL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpper(Vector128<UInt16>, Vector64<UInt16>, Byte) |
uint32x4_t vmull_high_lane_u16 (a, uint16x4_t v, const int lane uint16x8_t) A32: VMULL. U16 Qd, Dn+1, Dm[lane] A64: UMULL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpper(Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint64x2_t vmull_high_laneq_u32 (uint32x4_t a, uint32x4_t v, const int lane) A32: VMULL. U32 Qd, Dn+1, Dm[lane] A64: UMULL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpper(Vector128<UInt32>, Vector64<UInt32>, Byte) |
uint64x2_t vmull_high_lane_u32 (uint32x4_t a, uint32x2_t v, const int lane) A32: VMULL. U32 Qd, Dn+1, Dm[lane] A64: UMULL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>, Byte) |
int32x4_t vmlal_high_laneq_s16 (a, int16x8_t b, int16x8_t v, const int lane int32x4_t) A32: VMLAL. S16 Qd, Dn+1, Dm[lane] A64: SMLAL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<Int32>, Vector128<Int16>, Vector64<Int16>, Byte) |
int32x4_t vmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) A32: VMLAL. S16 Qd, Dn+1, Dm[lane] A64: SMLAL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>, Byte) |
int64x2_t vmlal_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) A32: VMLAL. S32 Qd, Dn+1, Dm[lane] A64: SMLAL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<Int64>, Vector128<Int32>, Vector64<Int32>, Byte) |
int64x2_t vmlal_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) A32: VMLAL. S32 Qd, Dn+1, Dm[lane] A64: SMLAL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<UInt32>, Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint32x4_t vmlal_high_laneq_u16 (a, uint16x8_t b, uint16x8_t v, const int lane uint32x4_t) A32: VMLAL. U16 Qd, Dn+1, Dm[lane] A64: UMLAL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<UInt32>, Vector128<UInt16>, Vector64<UInt16>, Byte) |
uint32x4_t vmlal_high_lane_u16 (a, uint16x8_t b, uint16x4_t v, const int lane uint32x4_t) A32: VMLAL. U16 Qd, Dn+1, Dm[lane] A64: UMLAL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<UInt64>, Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint64x2_t vmlal_high_laneq_u32 (a, uint32x4_t b, uint32x4_t v, const int lane uint64x2_t) A32: VMLAL. U32 Qd, Dn+1, Dm[lane] A64: UMLAL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<UInt64>, Vector128<UInt32>, Vector64<UInt32>, Byte) |
uint64x2_t vmlal_high_lane_u32 (a, uint32x4_t b, uint32x2_t v, const int lane uint64x2_t) A32: VMLAL. U32 Qd, Dn+1, Dm[lane] A64: UMLAL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>, Byte) |
int32x4_t vmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) A32: VMLSL. S16 Qd, Dn+1, Dm[lane] A64: SMLSL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<Int32>, Vector128<Int16>, Vector64<Int16>, Byte) |
int32x4_t vmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) A32: VMLSL. S16 Qd, Dn+1, Dm[lane] A64: SMLSL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>, Byte) |
int64x2_t vmlsl_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) A32: VMLSL. S32 Qd, Dn+1, Dm[lane] A64: SMLSL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<Int64>, Vector128<Int32>, Vector64<Int32>, Byte) |
int64x2_t vmlsl_high_lane_s32 (a, int32x4_t b, int32x2_t v, const int lane int64x2_t) A32: VMLSL. S32 Qd, Dn+1, Dm[lane] A64: SMLSL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<UInt32>, Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint32x4_t vmlsl_high_laneq_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t v, const int lane) A32: VMLSL. U16 Qd, Dn+1, Dm[lane] A64: UMLSL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<UInt32>, Vector128<UInt16>, Vector64<UInt16>, Byte) |
uint32x4_t vmlsl_high_lane_u16 (a, uint16x8_t b, uint16x4_t v, const int lane uint32x4_t) A32: VMLSL. U16 Qd, Dn+1, Dm[lane] A64: UMLSL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<UInt64>, Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint64x2_t vmlsl_high_laneq_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t v, const int lane) A32: VMLSL. U32 Qd, Dn+1, Dm[lane] A64: UMLSL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<UInt64>, Vector128<UInt32>, Vector64<UInt32>, Byte) |
uint64x2_t vmlsl_high_lane_u32 (uint64x2_t a, uint32x4_t b, uint32x2_t v, const int lane) A32: VMLSL. U32 Qd, Dn+1, Dm[lane] A64: UMLSL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyDoublingByScalarSaturateHigh(Vector128<Int16>, Vector64<Int16>) |
int16x8_t vqdmulhq_n_s16 (int16x8_t a, int16_t b) A32: VQDMULH. S16 Qd, Qn, Dm[0] A64: SQDMULH Vd.8H, Vn.8H, Vm.H[0] |
MultiplyDoublingByScalarSaturateHigh(Vector128<Int32>, Vector64<Int32>) |
int32x4_t vqdmulhq_n_s32 (int32x4_t a, int32_t b) A32: VQDMULH. S32 Qd, Qn, Dm[0] A64: SQDMULH Vd.4S, Vn.4S, Vm.S[0] |
MultiplyDoublingByScalarSaturateHigh(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vqdmulh_n_s16 (int16x4_t a, int16_t b) A32: VQDMULH. S16 Dd, Dn, Dm[0] A64: SQDMULH Vd.4H, Vn.4H, Vm.H[0] |
MultiplyDoublingByScalarSaturateHigh(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vqdmulh_n_s32 (a, int32_t b int32x2_t) A32: VQDMULH. S32 Dd, Dn, Dm[0] A64: SQDMULH Vd.2S, Vn.2S, Vm.S[0] |
MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vqdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) A32: VQDMULH. S16 Qd, Qn, Dm[lane] A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<Int16>, Vector64<Int16>, Byte) |
int16x8_t vqdmulhq_lane_s16 (a, int16x4_t v, const int lane int16x8_t) A32: VQDMULH. S16 Qd, Qn, Dm[lane] A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vqdmulhq_laneq_s32 (a, int32x4_t v, const int lane int32x4_t) A32: VQDMULH. S32 Qd, Qn, Dm[lane] A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<Int32>, Vector64<Int32>, Byte) |
int32x4_t vqdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) A32: VQDMULH. S32 Qd, Qn, Dm[lane] A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<Int16>, Vector128<Int16>, Byte) |
int16x4_t vqdmulh_laneq_s16 (a, int16x8_t v, const int lane int16x4_t) A32: VQDMULH. S16 Dd, Dn, Dm[lane] A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vqdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane) A32: VQDMULH. S16 Dd, Dn, Dm[lane] A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<Int32>, Vector128<Int32>, Byte) |
int32x2_t vqdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) A32: VQDMULH. S32 Dd, Dn, Dm[lane] A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vqdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane) A32: VQDMULH. S32 Dd, Dn, Dm[lane] A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyDoublingSaturateHigh(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vqdmulhq_s16 (int16x8_t a, int16x8_t b) A32: VQDMULH. S16 Qd, Qn, Qm A64: SQDMULH Vd.8H, Vn.8H, Vm.8H |
MultiplyDoublingSaturateHigh(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vqdmulhq_s32 (int32x4_t a, int32x4_t b) A32: VQDMULH. S32 Qd, Qn, Qm A64: SQDMULH Vd.4S, Vn.4S, Vm.4S |
MultiplyDoublingSaturateHigh(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vqdmulh_s16 (int16x4_t a, int16x4_t b) A32: VQDMULH. S16 Dd, Dn, Dm A64: SQDMULH Vd.4H, Vn.4H, Vm.4H |
MultiplyDoublingSaturateHigh(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vqdmulh_s32 (int32x2_t a, int32x2_t b) A32: VQDMULH. S32 Dd, Dn, Dm A64: SQDMULH Vd.2S, Vn.2S, Vm.2S |
MultiplyDoublingWideningLowerAndAddSaturate(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>) |
int32x4_t vqdmlal_s16 (a, int16x4_t b, int16x4_t c int32x4_t) A32: VQDMLAL. S16 Qd, Dn, Dm A64: SQDMLAL Vd.4S, Vn.4H, Vm.4H |
MultiplyDoublingWideningLowerAndAddSaturate(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>) |
int64x2_t vqdmlal_s32 (a, int32x2_t b, int32x2_t c int64x2_t) A32: VQDMLAL. S32 Qd, Dn, Dm A64: SQDMLAL Vd.2D, Vn.2S, Vm.2S |
MultiplyDoublingWideningLowerAndSubtractSaturate(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>) |
int32x4_t vqdmlsl_s16 (a, int16x4_t b, int16x4_t c int32x4_t) A32: VQDMLSL. S16 Qd, Dn, Dm A64: SQDMLSL Vd.4S, Vn.4H, Vm.4H |
MultiplyDoublingWideningLowerAndSubtractSaturate(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>) |
int64x2_t vqdmlsl_s32 (a, int32x2_t b, int32x2_t c int64x2_t) A32: VQDMLSL. S32 Qd, Dn, Dm A64: SQDMLSL Vd.2D, Vn.2S, Vm.2S |
MultiplyDoublingWideningLowerByScalarAndAddSaturate(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>) |
int32x4_t vqdmlal_n_s16 (a, int16x4_t b, int16_t c int32x4_t) A32: VQDMLAL. S16 Qd, Dn, Dm[0] A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[0] |
MultiplyDoublingWideningLowerByScalarAndAddSaturate(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>) |
int64x2_t vqdmlal_n_s32 (a, int32x2_t b, int32_t c int64x2_t) A32: VQDMLAL. S32 Qd, Dn, Dm[0] A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[0] |
MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>) |
int32x4_t vqdmlsl_n_s16 (a, int16x4_t b, int16_t c int32x4_t) A32: VQDMLSL. S16 Qd, Dn, Dm[0] A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[0] |
MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>) |
int64x2_t vqdmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c) A32: VQDMLSL. S32 Qd, Dn, Dm[0] A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[0] |
MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<Int32>, Vector64<Int16>, Vector128<Int16>, Byte) |
int32x4_t vqdmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) A32: VQDMLAL. S16 Qd, Dn, Dm[lane] A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>, Byte) |
int32x4_t vqdmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) A32: VQDMLAL. S16 Qd, Dn, Dm[lane] A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<Int64>, Vector64<Int32>, Vector128<Int32>, Byte) |
int64x2_t vqdmlal_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) A32: VQDMLAL. S32 Qd, Dn, Dm[lane] A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>, Byte) |
int64x2_t vqdmlal_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) A32: VQDMLAL. S32 Qd, Dn, Dm[lane] A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<Int32>, Vector64<Int16>, Vector128<Int16>, Byte) |
int32x4_t vqdmlsl_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) A32: VQDMLSL. S16 Qd, Dn, Dm[lane] A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>, Byte) |
int32x4_t vqdmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) A32: VQDMLSL. S16 Qd, Dn, Dm[lane] A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<Int64>, Vector64<Int32>, Vector128<Int32>, Byte) |
int64x2_t vqdmlsl_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) A32: VQDMLSL. S32 Qd, Dn, Dm[lane] A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>, Byte) |
int64x2_t vqdmlsl_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) A32: VQDMLSL. S32 Qd, Dn, Dm[lane] A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyDoublingWideningSaturateLower(Vector64<Int16>, Vector64<Int16>) |
int32x4_t vqdmull_s16 (int16x4_t a, int16x4_t b) A32: VQDMULL. S16 Qd, Dn, Dm A64: SQDMULL Vd.4S, Vn.4H, Vm.4H |
MultiplyDoublingWideningSaturateLower(Vector64<Int32>, Vector64<Int32>) |
int64x2_t vqdmull_s32 (int32x2_t a, int32x2_t b) A32: VQDMULL. S32 Qd, Dn, Dm A64: SQDMULL Vd.2D, Vn.2S, Vm.2S |
MultiplyDoublingWideningSaturateLowerByScalar(Vector64<Int16>, Vector64<Int16>) |
int32x4_t vqdmull_n_s16 (int16x4_t a, int16_t b) A32: VQDMULL. S16 Qd, Dn, Dm[0] A64: SQDMULL Vd.4S, Vn.4H, Vm.H[0] |
MultiplyDoublingWideningSaturateLowerByScalar(Vector64<Int32>, Vector64<Int32>) |
int64x2_t vqdmull_n_s32 (int32x2_t a, int32_t b) A32: VQDMULL. S32 Qd, Dn, Dm[0] A64: SQDMULL Vd.2D, Vn.2S, Vm.S[0] |
MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<Int16>, Vector128<Int16>, Byte) |
int32x4_t vqdmull_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) A32: VQDMULL. S16 Qd, Dn, Dm[lane] A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<Int16>, Vector64<Int16>, Byte) |
int32x4_t vqdmull_lane_s16 (int16x4_t a, int16x4_t v, const int lane) A32: VQDMULL. S16 Qd, Dn, Dm[lane] A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane] |
MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<Int32>, Vector128<Int32>, Byte) |
int64x2_t vqdmull_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) A32: VQDMULL. S32 Qd, Dn, Dm[lane] A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<Int32>, Vector64<Int32>, Byte) |
int64x2_t vqdmull_lane_s32 (a, int32x2_t v, const int lane int32x2_t) A32: VQDMULL. S32 Qd, Dn, Dm[lane] A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane] |
MultiplyDoublingWideningSaturateUpper(Vector128<Int16>, Vector128<Int16>) |
int32x4_t vqdmull_high_s16 (int16x8_t a, int16x8_t b) A32: VQDMULL. S16 Qd, Dn+1, Dm+1 A64: SQDMULL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyDoublingWideningSaturateUpper(Vector128<Int32>, Vector128<Int32>) |
int64x2_t vqdmull_high_s32 (int32x4_t a, int32x4_t b) A32: VQDMULL. S32 Qd, Dn+1, Dm+1 A64: SQDMULL2 Vd.2D, Vn.4S, Vm.4S |
MultiplyDoublingWideningSaturateUpperByScalar(Vector128<Int16>, Vector64<Int16>) |
int32x4_t vqdmull_high_n_s16 (int16x8_t a, int16_t b) A32: VQDMULL. S16 Qd, Dn+1, Dm[0] A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[0] |
MultiplyDoublingWideningSaturateUpperByScalar(Vector128<Int32>, Vector64<Int32>) |
int64x2_t vqdmull_high_n_s32 (int32x4_t a, int32_t b) A32: VQDMULL. S32 Qd, Dn+1, Dm[0] A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[0] |
MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<Int16>, Vector128<Int16>, Byte) |
int32x4_t vqdmull_high_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) A32: VQDMULL. S16 Qd, Dn+1, Dm[lane] A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<Int16>, Vector64<Int16>, Byte) |
int32x4_t vqdmull_high_lane_s16 (int16x8_t a, int16x4_t v, const int lane) A32: VQDMULL. S16 Qd, Dn+1, Dm[lane] A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<Int32>, Vector128<Int32>, Byte) |
int64x2_t vqdmull_high_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) A32: VQDMULL. S32 Qd, Dn+1, Dm[lane] A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<Int32>, Vector64<Int32>, Byte) |
int64x2_t vqdmull_high_lane_s32 (int32x4_t a, int32x2_t v, const int lane) A32: VQDMULL. S32 Qd, Dn+1, Dm[lane] A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyDoublingWideningUpperAndAddSaturate(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>) |
int32x4_t vqdmlal_high_s16 (a, int16x8_t b, int16x8_t c int32x4_t) A32: VQDMLAL. S16 Qd, Dn+1, Dm+1 A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyDoublingWideningUpperAndAddSaturate(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>) |
int64x2_t vqdmlal_high_s32 (a, int32x4_t b, int32x4_t c int64x2_t) A32: VQDMLAL. S32 Qd, Dn+1, Dm+1 A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.4S |
MultiplyDoublingWideningUpperAndSubtractSaturate(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>) |
int32x4_t vqdmlsl_high_s16 (a, int16x8_t b, int16x8_t c int32x4_t) A32: VQDMLSL. S16 Qd, Dn+1, Dm+1 A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyDoublingWideningUpperAndSubtractSaturate(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>) |
int64x2_t vqdmlsl_high_s32 (a, int32x4_t b, int32x4_t c int64x2_t) A32: VQDMLSL. S32 Qd, Dn+1, Dm+1 A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.4S |
MultiplyDoublingWideningUpperByScalarAndAddSaturate(Vector128<Int32>, Vector128<Int16>, Vector64<Int16>) |
int32x4_t vqdmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) A32: VQDMLAL. S16 Qd, Dn+1, Dm[0] A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[0] |
MultiplyDoublingWideningUpperByScalarAndAddSaturate(Vector128<Int64>, Vector128<Int32>, Vector64<Int32>) |
int64x2_t vqdmlal_high_n_s32 (a, int32x4_t b, int32_t c int64x2_t) A32: VQDMLAL. S32 Qd, Dn+1, Dm[0] A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[0] |
MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(Vector128<Int32>, Vector128<Int16>, Vector64<Int16>) |
int32x4_t vqdmlsl_high_n_s16 (a, int16x8_t b, int16_t c int32x4_t) A32: VQDMLSL. S16 Qd, Dn+1, Dm[0] A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[0] |
MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(Vector128<Int64>, Vector128<Int32>, Vector64<Int32>) |
int64x2_t vqdmlsl_high_n_s32 (a, int32x4_t b, int32_t c int64x2_t) A32: VQDMLSL. S32 Qd, Dn+1, Dm[0] A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[0] |
MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>, Byte) |
int32x4_t vqdmlal_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) A32: VQDMLAL. S16 Qd, Dn+1, Dm[lane] A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<Int32>, Vector128<Int16>, Vector64<Int16>, Byte) |
int32x4_t vqdmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) A32: VQDMLAL. S16 Qd, Dn+1, Dm[lane] A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>, Byte) |
int64x2_t vqdmlal_high_laneq_s32 (a, int32x4_t b, int32x4_t v, const int lane int64x2_t) A32: VQDMLAL. S32 Qd, Dn+1, Dm[lane] A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<Int64>, Vector128<Int32>, Vector64<Int32>, Byte) |
int64x2_t vqdmlal_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) A32: VQDMLAL. S32 Qd, Dn+1, Dm[lane] A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>, Byte) |
int32x4_t vqdmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) A32: VQDMLSL. S16 Qd, Dn+1, Dm[lane] A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<Int32>, Vector128<Int16>, Vector64<Int16>, Byte) |
int32x4_t vqdmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) A32: VQDMLSL. S16 Qd, Dn+1, Dm[lane] A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane] |
MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>, Byte) |
int64x2_t vqdmlsl_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) A32: VQDMLSL. S32 Qd, Dn+1, Dm[lane] A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<Int64>, Vector128<Int32>, Vector64<Int32>, Byte) |
int64x2_t vqdmlsl_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) A32: VQDMLSL. S32 Qd, Dn+1, Dm[lane] A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane] |
MultiplyRoundedDoublingByScalarSaturateHigh(Vector128<Int16>, Vector64<Int16>) |
int16x8_t vqrdmulhq_n_s16 (a, int16_t b int16x8_t) A32: VQRDMULH. S16 Qd, Qn, Dm[0] A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[0] |
MultiplyRoundedDoublingByScalarSaturateHigh(Vector128<Int32>, Vector64<Int32>) |
int32x4_t vqrdmulhq_n_s32 (a, int32_t b int32x4_t) A32: VQRDMULH. S32 Qd, Qn, Dm[0] A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[0] |
MultiplyRoundedDoublingByScalarSaturateHigh(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vqrdmulh_n_s16 (int16x4_t a, int16_t b) A32: VQRDMULH. S16 Dd, Dn, Dm[0] A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[0] |
MultiplyRoundedDoublingByScalarSaturateHigh(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vqrdmulh_n_s32 (a, int32_t b int32x2_t) A32: VQRDMULH. S32 Dd, Dn, Dm[0] A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[0] |
MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vqrdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) A32: VQRDMULH. S16 Qd, Qn, Dm[lane] A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<Int16>, Vector64<Int16>, Byte) |
int16x8_t vqrdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) A32: VQRDMULH. S16 Qd, Qn, Dm[lane] A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane] |
MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vqrdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) A32: VQRDMULH. S32 Qd, Qn, Dm[lane] A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<Int32>, Vector64<Int32>, Byte) |
int32x4_t vqrdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) A32: VQRDMULH. S32 Qd, Qn, Dm[lane] A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane] |
MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<Int16>, Vector128<Int16>, Byte) |
int16x4_t vqrdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) A32: VQRDMULH. S16 Dd, Dn, Dm[lane] A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vqrdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane) A32: VQRDMULH. S16 Dd, Dn, Dm[lane] A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane] |
MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<Int32>, Vector128<Int32>, Byte) |
int32x2_t vqrdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) A32: VQRDMULH. S32 Dd, Dn, Dm[lane] A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vqrdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane) A32: VQRDMULH. S32 Dd, Dn, Dm[lane] A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyRoundedDoublingSaturateHigh(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vqrdmulhq_s16 (int16x8_t a, int16x8_t b) A32: VQRDMULH. S16 Qd, Qn, Qm A64: SQRDMULH Vd.8H, Vn.8H, Vm.8H |
MultiplyRoundedDoublingSaturateHigh(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vqrdmulhq_s32 (int32x4_t a, int32x4_t b) A32: VQRDMULH. S32 Qd, Qn, Qm A64: SQRDMULH Vd.4S, Vn.4S, Vm.4S |
MultiplyRoundedDoublingSaturateHigh(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vqrdmulh_s16 (int16x4_t a, int16x4_t b) A32: VQRDMULH. S16 Dd, Dn, Dm A64: SQRDMULH Vd.4H, Vn.4H, Vm.4H |
MultiplyRoundedDoublingSaturateHigh(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vqrdmulh_s32 (a, int32x2_t b int32x2_t) A32: VQRDMULH. S32 Dd, Dn, Dm A64: SQRDMULH Vd.2S, Vn.2S, Vm.2S |
MultiplyScalar(Vector64<Double>, Vector64<Double>) |
float64x1_t vmul_f64 (a, float64x1_t b float64x1_t) A32: VMUL. F64 Dd, Dn, Dm A64: FMUL Dd, Dn, Dm |
MultiplyScalar(Vector64<Single>, Vector64<Single>) |
float32_t vmuls_f32 (float32_t a, float32_t b) A32: VMUL. F32 Sd, Sn, Sm A64: FMUL Sd, Sn, Sm Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
MultiplyScalarBySelectedScalar(Vector64<Single>, Vector128<Single>, Byte) |
float32_t vmuls_laneq_f32 (float32_t a, float32x4_t v, const int lane) A32: VMUL. F32 Sd, Sn, Dm[lane] A64: FMUL Sd, Sn, Vm.S[lane] |
MultiplyScalarBySelectedScalar(Vector64<Single>, Vector64<Single>, Byte) |
float32_t vmuls_lane_f32 (float32_t a, float32x2_t v, const int lane) A32: VMUL. F32 Sd, Sn, Dm[lane] A64: FMUL Sd, Sn, Vm.S[lane] |
MultiplySubtract(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vmlsq_u8 (a, uint8x16_t b, uint8x16_t c uint8x16_t) A32: VMLS. I8 Qd, Qn, Qm A64: MLS Vd.16B, Vn.16B, Vm.16B |
MultiplySubtract(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>) |
int16x8_t vmlsq_s16 (a, int16x8_t b, int16x8_t c int16x8_t) A32: VMLS. I16 Qd, Qn, Qm A64: MLS Vd.8H, Vn.8H, Vm.8H |
MultiplySubtract(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>) |
int32x4_t vmlsq_s32 (a, int32x4_t b, int32x4_t c int32x4_t) A32: VMLS. I32 Qd, Qn, Qm A64: MLS Vd.4S, Vn.4S, Vm.4S |
MultiplySubtract(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>) |
int8x16_t vmlsq_s8 (a, int8x16_t b, int8x16_t c int8x16_t) A32: VMLS. I8 Qd, Qn, Qm A64: MLS Vd.16B, Vn.16B, Vm.16B |
MultiplySubtract(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) A32: VMLS. I16 Qd, Qn, Qm A64: MLS Vd.8H, Vn.8H, Vm.8H |
MultiplySubtract(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vmlsq_u32 (a, uint32x4_t b, uint32x4_t c uint32x4_t) A32: VMLS. I32 Qd, Qn, Qm A64: MLS Vd.4S, Vn.4S, Vm.4S |
MultiplySubtract(Vector64<Byte>, Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vmls_u8 (a, uint8x8_t b, uint8x8_t c uint8x8_t) A32: VMLS. I8 Dd, Dn, Dm A64: MLS Vd.8B, Vn.8B, Vm.8B |
MultiplySubtract(Vector64<Int16>, Vector64<Int16>, Vector64<Int16>) |
int16x4_t vmls_s16 (a, int16x4_t b, int16x4_t c int16x4_t) A32: VMLS. I16 Dd, Dn, Dm A64: MLS Vd.4H, Vn.4H, Vm.4H |
MultiplySubtract(Vector64<Int32>, Vector64<Int32>, Vector64<Int32>) |
int32x2_t vmls_s32 (a, int32x2_t b, int32x2_t c int32x2_t) A32: VMLS. I32 Dd, Dn, Dm A64: MLS Vd.2S, Vn.2S, Vm.2S |
MultiplySubtract(Vector64<SByte>, Vector64<SByte>, Vector64<SByte>) |
int8x8_t vmls_s8 (a, int8x8_t b, int8x8_t c int8x8_t) A32: VMLS. I8 Dd, Dn, Dm A64: MLS Vd.8B, Vn.8B, Vm.8B |
MultiplySubtract(Vector64<UInt16>, Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vmls_u16 (a, uint16x4_t b, uint16x4_t c uint16x4_t) A32: VMLS. I16 Dd, Dn, Dm A64: MLS Vd.4H, Vn.4H, Vm.4H |
MultiplySubtract(Vector64<UInt32>, Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vmls_u32 (a, uint32x2_t b, uint32x2_t c uint32x2_t) A32: VMLS. I32 Dd, Dn, Dm A64: MLS Vd.2S, Vn.2S, Vm.2S |
MultiplySubtractByScalar(Vector128<Int16>, Vector128<Int16>, Vector64<Int16>) |
int16x8_t vmlsq_n_s16 (a, int16x8_t b, int16_t c int16x8_t) A32: VMLS. I16 Qd, Qn, Dm[0] A64: MLS Vd.8H, Vn.8H, Vm.H[0] |
MultiplySubtractByScalar(Vector128<Int32>, Vector128<Int32>, Vector64<Int32>) |
int32x4_t vmlsq_n_s32 (a, int32x4_t b, int32_t c int32x4_t) A32: VMLS. I32 Qd, Qn, Dm[0] A64: MLS Vd.4S, Vn.4S, Vm.S[0] |
MultiplySubtractByScalar(Vector128<UInt16>, Vector128<UInt16>, Vector64<UInt16>) |
uint16x8_t vmlsq_n_u16 (a, uint16x8_t b, uint16_t c uint16x8_t) A32: VMLS. I16 Qd, Qn, Dm[0] A64: MLS Vd.8H, Vn.8H, Vm.H[0] |
MultiplySubtractByScalar(Vector128<UInt32>, Vector128<UInt32>, Vector64<UInt32>) |
uint32x4_t vmlsq_n_u32 (a, uint32x4_t b, uint32_t c uint32x4_t) A32: VMLS. I32 Qd, Qn, Dm[0] A64: MLS Vd.4S, Vn.4S, Vm.S[0] |
MultiplySubtractByScalar(Vector64<Int16>, Vector64<Int16>, Vector64<Int16>) |
int16x4_t vmls_n_s16 (a, int16x4_t b, int16_t c int16x4_t) A32: VMLS. I16 Dd, Dn, Dm[0] A64: MLS Vd.4H, Vn.4H, Vm.H[0] |
MultiplySubtractByScalar(Vector64<Int32>, Vector64<Int32>, Vector64<Int32>) |
int32x2_t vmls_n_s32 (a, int32x2_t b, int32_t c int32x2_t) A32: VMLS. I32 Dd, Dn, Dm[0] A64: MLS Vd.2S, Vn.2S, Vm.S[0] |
MultiplySubtractByScalar(Vector64<UInt16>, Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vmls_n_u16 (a, uint16x4_t b, uint16_t c uint16x4_t) A32: VMLS. I16 Dd, Dn, Dm[0] A64: MLS Vd.4H, Vn.4H, Vm.H[0] |
MultiplySubtractByScalar(Vector64<UInt32>, Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vmls_n_u32 (a, uint32x2_t b, uint32_t c uint32x2_t) A32: VMLS. I32 Dd, Dn, Dm[0] A64: MLS Vd.2S, Vn.2S, Vm.S[0] |
MultiplySubtractBySelectedScalar(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vmlsq_laneq_s16 (int16x8_t a, int16x8_t b, int16x8_t v, const int lane) A32: VMLS. I16 Qd, Qn, Dm[lane] A64: MLS Vd.8H, Vn.8H, Vm.H[lane] |
MultiplySubtractBySelectedScalar(Vector128<Int16>, Vector128<Int16>, Vector64<Int16>, Byte) |
int16x8_t vmlsq_lane_s16 (int16x8_t a, int16x8_t b, int16x4_t v, const int lane) A32: VMLS. I16 Qd, Qn, Dm[lane] A64: MLS Vd.8H, Vn.8H, Vm.H[lane] |
MultiplySubtractBySelectedScalar(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vmlsq_laneq_s32 (int32x4_t a, int32x4_t b, int32x4_t v, const int lane) A32: VMLS. I32 Qd, Qn, Dm[lane] A64: MLS Vd.4S, Vn.4S, Vm.S[lane] |
MultiplySubtractBySelectedScalar(Vector128<Int32>, Vector128<Int32>, Vector64<Int32>, Byte) |
int32x4_t vmlsq_lane_s32 (int32x4_t a, int32x4_t b, int32x2_t v, const int lane) A32: VMLS. I32 Qd, Qn, Dm[lane] A64: MLS Vd.4S, Vn.4S, Vm.S[lane] |
MultiplySubtractBySelectedScalar(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint16x8_t vmlsq_laneq_u16 (a, uint16x8_t b, uint16x8_t v, const int lane uint16x8_t) A32: VMLS. I16 Qd, Qn, Dm[lane] A64: MLS Vd.8H, Vn.8H, Vm.H[lane] |
MultiplySubtractBySelectedScalar(Vector128<UInt16>, Vector128<UInt16>, Vector64<UInt16>, Byte) |
uint16x8_t vmlsq_lane_u16 (uint16x8_t a, uint16x8_t b, uint16x4_t v, const int lane) A32: VMLS. I16 Qd, Qn, Dm[lane] A64: MLS Vd.8H, Vn.8H, Vm.H[lane] |
MultiplySubtractBySelectedScalar(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint32x4_t vmlsq_laneq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t v, const int lane) A32: VMLS. I32 Qd, Qn, Dm[lane] A64: MLS Vd.4S, Vn.4S, Vm.S[lane] |
MultiplySubtractBySelectedScalar(Vector128<UInt32>, Vector128<UInt32>, Vector64<UInt32>, Byte) |
uint32x4_t vmlsq_lane_u32 (uint32x4_t a, uint32x4_t b, uint32x2_t v, const int lane) A32: VMLS. I32 Qd, Qn, Dm[lane] A64: MLS Vd.4S, Vn.4S, Vm.S[lane] |
MultiplySubtractBySelectedScalar(Vector64<Int16>, Vector64<Int16>, Vector128<Int16>, Byte) |
int16x4_t vmls_laneq_s16 (int16x4_t a, int16x4_t b, int16x8_t v, const int lane) A32: VMLS. I16 Dd, Dn, Dm[lane] A64: MLS Vd.4H, Vn.4H, Vm.H[lane] |
MultiplySubtractBySelectedScalar(Vector64<Int16>, Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vmls_lane_s16 (int16x4_t a, int16x4_t b, int16x4_t v, const int lane) A32: VMLS. I16 Dd, Dn, Dm[lane] A64: MLS Vd.4H, Vn.4H, Vm.H[lane] |
MultiplySubtractBySelectedScalar(Vector64<Int32>, Vector64<Int32>, Vector128<Int32>, Byte) |
int32x2_t vmls_laneq_s32 (int32x2_t a, int32x2_t b, int32x4_t v, const int lane) A32: VMLS. I32 Dd, Dn, Dm[lane] A64: MLS Vd.2S, Vn.2S, Vm.S[lane] |
MultiplySubtractBySelectedScalar(Vector64<Int32>, Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vmls_lane_s32 (int32x2_t a, int32x2_t b, int32x2_t v, const int lane) A32: VMLS. I32 Dd, Dn, Dm[lane] A64: MLS Vd.2S, Vn.2S, Vm.S[lane] |
MultiplySubtractBySelectedScalar(Vector64<UInt16>, Vector64<UInt16>, Vector128<UInt16>, Byte) |
uint16x4_t vmls_laneq_u16 (uint16x4_t a, uint16x4_t b, uint16x8_t v, const int lane) A32: VMLS. I16 Dd, Dn, Dm[lane] A64: MLS Vd.4H, Vn.4H, Vm.H[lane] |
MultiplySubtractBySelectedScalar(Vector64<UInt16>, Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint16x4_t vmls_lane_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t v, const int lane) A32: VMLS. I16 Dd, Dn, Dm[lane] A64: MLS Vd.4H, Vn.4H, Vm.H[lane] |
MultiplySubtractBySelectedScalar(Vector64<UInt32>, Vector64<UInt32>, Vector128<UInt32>, Byte) |
uint32x2_t vmls_laneq_u32 (uint32x2_t a, uint32x2_t b, uint32x4_t v, const int lane) A32: VMLS. I32 Dd, Dn, Dm[lane] A64: MLS Vd.2S, Vn.2S, Vm.S[lane] |
MultiplySubtractBySelectedScalar(Vector64<UInt32>, Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint32x2_t vmls_lane_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t v, const int lane) A32: VMLS. I32 Dd, Dn, Dm[lane] A64: MLS Vd.2S, Vn.2S, Vm.S[lane] |
MultiplyWideningLower(Vector64<Byte>, Vector64<Byte>) |
uint16x8_t vmull_u8 (uint8x8_t a, uint8x8_t b) A32: VMULL. U8 Qd, Dn, Dm A64: UMULL Vd.8H, Vn.8B, Vm.8B |
MultiplyWideningLower(Vector64<Int16>, Vector64<Int16>) |
int32x4_t vmull_s16 (a, int16x4_t b int16x4_t) A32: VMULL. S16 Qd, Dn, Dm A64: SMULL Vd.4S, Vn.4H, Vm.4H |
MultiplyWideningLower(Vector64<Int32>, Vector64<Int32>) |
int64x2_t vmull_s32 (a, int32x2_t b int32x2_t) A32: VMULL. S32 Qd, Dn, Dm A64: SMULL Vd.2D, Vn.2S, Vm.2S |
MultiplyWideningLower(Vector64<SByte>, Vector64<SByte>) |
int16x8_t vmull_s8 (int8x8_t a, int8x8_t b) A32: VMULL. S8 Qd, Dn, Dm A64: SMULL Vd.8H, Vn.8B, Vm.8B |
MultiplyWideningLower(Vector64<UInt16>, Vector64<UInt16>) |
uint32x4_t vmull_u16 (uint16x4_t a, uint16x4_t b) A32: VMULL. U16 Qd, Dn, Dm A64: UMULL Vd.4S, Vn.4H, Vm.4H |
MultiplyWideningLower(Vector64<UInt32>, Vector64<UInt32>) |
uint64x2_t vmull_u32 (uint32x2_t a, uint32x2_t b) A32: VMULL. U32 Qd, Dn, Dm A64: UMULL Vd.2D, Vn.2S, Vm.2S |
MultiplyWideningLowerAndAdd(Vector128<Int16>, Vector64<SByte>, Vector64<SByte>) |
int16x8_t vmlal_s8 (a, int8x8_t b, int8x8_t c int16x8_t) A32: VMLAL. S8 Qd, Dn, Dm A64: SMLAL Vd.8H, Vn.8B, Vm.8B |
MultiplyWideningLowerAndAdd(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>) |
int32x4_t vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) A32: VMLAL. S16 Qd, Dn, Dm A64: SMLAL Vd.4S, Vn.4H, Vm.4H |
MultiplyWideningLowerAndAdd(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>) |
int64x2_t vmlal_s32 (a, int32x2_t b, int32x2_t c int64x2_t) A32: VMLAL. S32 Qd, Dn, Dm A64: SMLAL Vd.2D, Vn.2S, Vm.2S |
MultiplyWideningLowerAndAdd(Vector128<UInt16>, Vector64<Byte>, Vector64<Byte>) |
uint16x8_t vmlal_u8 (a, uint8x8_t b, uint8x8_t c uint16x8_t) A32: VMLAL. U8 Qd, Dn, Dm A64: UMLAL Vd.8H, Vn.8B, Vm.8B |
MultiplyWideningLowerAndAdd(Vector128<UInt32>, Vector64<UInt16>, Vector64<UInt16>) |
uint32x4_t vmlal_u16 (a, uint16x4_t b, uint16x4_t c uint32x4_t) A32: VMLAL. U16 Qd, Dn, Dm A64: UMLAL Vd.4S, Vn.4H, Vm.4H |
MultiplyWideningLowerAndAdd(Vector128<UInt64>, Vector64<UInt32>, Vector64<UInt32>) |
uint64x2_t vmlal_u32 (a, uint32x2_t b, uint32x2_t c uint64x2_t) A32: VMLAL. U32 Qd, Dn, Dm A64: UMLAL Vd.2D, Vn.2S, Vm.2S |
MultiplyWideningLowerAndSubtract(Vector128<Int16>, Vector64<SByte>, Vector64<SByte>) |
int16x8_t vmlsl_s8 (a, int8x8_t b, int8x8_t c int16x8_t) A32: VMLSL. S8 Qd, Dn, Dm A64: SMLSL Vd.8H, Vn.8B, Vm.8B |
MultiplyWideningLowerAndSubtract(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>) |
int32x4_t vmlsl_s16 (a, int16x4_t b, int16x4_t c int32x4_t) A32: VMLSL. S16 Qd, Dn, Dm A64: SMLSL Vd.4S, Vn.4H, Vm.4H |
MultiplyWideningLowerAndSubtract(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>) |
int64x2_t vmlsl_s32 (a, int32x2_t b, int32x2_t c int64x2_t) A32: VMLSL. S32 Qd, Dn, Dm A64: SMLSL Vd.2D, Vn.2S, Vm.2S |
MultiplyWideningLowerAndSubtract(Vector128<UInt16>, Vector64<Byte>, Vector64<Byte>) |
uint16x8_t vmlsl_u8 (a, uint8x8_t b, uint8x8_t c uint16x8_t) A32: VMLSL. U8 Qd, Dn, Dm A64: UMLSL Vd.8H, Vn.8B, Vm.8B |
MultiplyWideningLowerAndSubtract(Vector128<UInt32>, Vector64<UInt16>, Vector64<UInt16>) |
uint32x4_t vmlsl_u16 (a, uint16x4_t b, uint16x4_t c uint32x4_t) A32: VMLSL. U16 Qd, Dn, Dm A64: UMLSL Vd.4S, Vn.4H, Vm.4H |
MultiplyWideningLowerAndSubtract(Vector128<UInt64>, Vector64<UInt32>, Vector64<UInt32>) |
uint64x2_t vmlsl_u32 (a, uint32x2_t b, uint32x2_t c uint64x2_t) A32: VMLSL. U32 Qd, Dn, Dm A64: UMLSL Vd.2D, Vn.2S, Vm.2S |
MultiplyWideningUpper(Vector128<Byte>, Vector128<Byte>) |
uint16x8_t vmull_high_u8 (uint8x16_t a, uint8x16_t b) A32: VMULL. U8 Qd, Dn+1, Dm+1 A64: UMULL2 Vd.8H, Vn.16B, Vm.16B |
MultiplyWideningUpper(Vector128<Int16>, Vector128<Int16>) |
int32x4_t vmull_high_s16 (int16x8_t a, int16x8_t b) A32: VMULL. S16 Qd, Dn+1, Dm+1 A64: SMULL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyWideningUpper(Vector128<Int32>, Vector128<Int32>) |
int64x2_t vmull_high_s32 (int32x4_t a, int32x4_t b) A32: VMULL. S32 Qd, Dn+1, Dm+1 A64: SMULL2 Vd.2D, Vn.4S, Vm.4S |
MultiplyWideningUpper(Vector128<SByte>, Vector128<SByte>) |
int16x8_t vmull_high_s8 (int8x16_t a, int8x16_t b) A32: VMULL. S8 Qd, Dn+1, Dm+1 A64: SMULL2 Vd.8H, Vn.16B, Vm.16B |
MultiplyWideningUpper(Vector128<UInt16>, Vector128<UInt16>) |
uint32x4_t vmull_high_u16 (uint16x8_t a, uint16x8_t b) A32: VMULL. U16 Qd, Dn+1, Dm+1 A64: UMULL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyWideningUpper(Vector128<UInt32>, Vector128<UInt32>) |
uint64x2_t vmull_high_u32 (uint32x4_t a, uint32x4_t b) A32: VMULL. U32 Qd, Dn+1, Dm+1 A64: UMULL2 Vd.2D, Vn.4S, Vm.4S |
MultiplyWideningUpperAndAdd(Vector128<Int16>, Vector128<SByte>, Vector128<SByte>) |
int16x8_t vmlal_high_s8 (a, int8x16_t b, int8x16_t c int16x8_t) A32: VMLAL. S8 Qd, Dn+1, Dm+1 A64: SMLAL2 Vd.8H, Vn.16B, Vm.16B |
MultiplyWideningUpperAndAdd(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>) |
int32x4_t vmlal_high_s16 (a, int16x8_t b, int16x8_t c int32x4_t) A32: VMLAL. S16 Qd, Dn+1, Dm+1 A64: SMLAL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyWideningUpperAndAdd(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>) |
int64x2_t vmlal_high_s32 (a, int32x4_t b, int32x4_t c int64x2_t) A32: VMLAL. S32 Qd, Dn+1, Dm+1 A64: SMLAL2 Vd.2D, Vn.4S, Vm.4S |
MultiplyWideningUpperAndAdd(Vector128<UInt16>, Vector128<Byte>, Vector128<Byte>) |
uint16x8_t vmlal_high_u8 (a, uint8x16_t b, uint8x16_t c uint16x8_t) A32: VMLAL. U8 Qd, Dn+1, Dm+1 A64: UMLAL2 Vd.8H, Vn.16B, Vm.16B |
MultiplyWideningUpperAndAdd(Vector128<UInt32>, Vector128<UInt16>, Vector128<UInt16>) |
uint32x4_t vmlal_high_u16 (a, uint16x8_t b, uint16x8_t c uint32x4_t) A32: VMLAL. U16 Qd, Dn+1, Dm+1 A64: UMLAL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyWideningUpperAndAdd(Vector128<UInt64>, Vector128<UInt32>, Vector128<UInt32>) |
uint64x2_t vmlal_high_u32 (a, uint32x4_t b, uint32x4_t c uint64x2_t) A32: VMLAL. U32 Qd, Dn+1, Dm+1 A64: UMLAL2 Vd.2D, Vn.4S, Vm.4S |
MultiplyWideningUpperAndSubtract(Vector128<Int16>, Vector128<SByte>, Vector128<SByte>) |
int16x8_t vmlsl_high_s8 (a, int8x16_t b, int8x16_t c int16x8_t) A32: VMLSL. S8 Qd, Dn+1, Dm+1 A64: SMLSL2 Vd.8H, Vn.16B, Vm.16B |
MultiplyWideningUpperAndSubtract(Vector128<Int32>, Vector128<Int16>, Vector128<Int16>) |
int32x4_t vmlsl_high_s16 (a, int16x8_t b, int16x8_t c int32x4_t) A32: VMLSL. S16 Qd, Dn+1, Dm+1 A64: SMLSL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyWideningUpperAndSubtract(Vector128<Int64>, Vector128<Int32>, Vector128<Int32>) |
int64x2_t vmlsl_high_s32 (a, int32x4_t b, int32x4_t c int64x2_t) A32: VMLSL. S32 Qd, Dn+1, Dm+1 A64: SMLSL2 Vd.2D, Vn.4S, Vm.4S |
MultiplyWideningUpperAndSubtract(Vector128<UInt16>, Vector128<Byte>, Vector128<Byte>) |
uint16x8_t vmlsl_high_u8 (a, uint8x16_t b, uint8x16_t c uint16x8_t) A32: VMLSL. U8 Qd, Dn+1, Dm+1 A64: UMLSL2 Vd.8H, Vn.16B, Vm.16B |
MultiplyWideningUpperAndSubtract(Vector128<UInt32>, Vector128<UInt16>, Vector128<UInt16>) |
uint32x4_t vmlsl_high_u16 (a, uint16x8_t b, uint16x8_t c uint32x4_t) A32: VMLSL. U16 Qd, Dn+1, Dm+1 A64: UMLSL2 Vd.4S, Vn.8H, Vm.8H |
MultiplyWideningUpperAndSubtract(Vector128<UInt64>, Vector128<UInt32>, Vector128<UInt32>) |
uint64x2_t vmlsl_high_u32 (a, uint32x4_t b, uint32x4_t c uint64x2_t) A32: VMLSL. U32 Qd, Dn+1, Dm+1 A64: UMLSL2 Vd.2D, Vn.4S, Vm.4S |
Negate(Vector128<Int16>) |
int16x8_t vnegq_s16 (int16x8_t a) A32: VNEG. S16 Qd, Qm A64: NEG Vd.8H, Vn.8H |
Negate(Vector128<Int32>) |
int32x4_t vnegq_s32 (int32x4_t a) A32: VNEG. S32 Qd, Qm A64: NEG Vd.4S, Vn.4S |
Negate(Vector128<SByte>) |
int8x16_t vnegq_s8 (int8x16_t a) A32: VNEG. S8 Qd, Qm A64: NEG Vd.16B, Vn.16B |
Negate(Vector128<Single>) |
float32x4_t vnegq_f32 (float32x4_t a) A32: VNEG. F32 Qd, Qm A64: FNEG Vd.4S, Vn.4S |
Negate(Vector64<Int16>) |
int16x4_t vneg_s16 (int16x4_t a) A32: VNEG. S16 Dd, Dm A64: NEG Vd.4H, Vn.4H |
Negate(Vector64<Int32>) |
int32x2_t vneg_s32 (int32x2_t a) A32: VNEG. S32 Dd, Dm A64: NEG Vd.2S, Vn.2S |
Negate(Vector64<SByte>) |
int8x8_t vneg_s8 (int8x8_t a) A32: VNEG. S8 Dd, Dm A64: NEG Vd.8B, Vn.8B |
Negate(Vector64<Single>) |
float32x2_t vneg_f32 (float32x2_t a) A32: VNEG. F32 Dd, Dm A64: FNEG Vd.2S, Vn.2S |
NegateSaturate(Vector128<Int16>) |
int16x8_t vqnegq_s16 (int16x8_t a) A32: VQNEG. S16 Qd, Qm A64: SQNEG Vd.8H, Vn.8H |
NegateSaturate(Vector128<Int32>) |
int32x4_t vqnegq_s32 (int32x4_t a) A32: VQNEG. S32 Qd, Qm A64: SQNEG Vd.4S, Vn.4S |
NegateSaturate(Vector128<SByte>) |
int8x16_t vqnegq_s8 (int8x16_t a) A32: VQNEG. S8 Qd, Qm A64: SQNEG Vd.16B, Vn.16B |
NegateSaturate(Vector64<Int16>) |
int16x4_t vqneg_s16 (int16x4_t a) A32: VQNEG. S16 Dd, Dm A64: SQNEG Vd.4H, Vn.4H |
NegateSaturate(Vector64<Int32>) |
int32x2_t vqneg_s32 (int32x2_t a) A32: VQNEG. S32 Dd, Dm A64: SQNEG Vd.2S, Vn.2S |
NegateSaturate(Vector64<SByte>) |
int8x8_t vqneg_s8 (int8x8_t a) A32: VQNEG. S8 Dd, Dm A64: SQNEG Vd.8B, Vn.8B |
NegateScalar(Vector64<Double>) |
float64x1_t vneg_f64 (float64x1_t a) A32: VNEG. F64 Dd, Dm A64: FNEG Dd, Dn |
NegateScalar(Vector64<Single>) |
float32_t vnegs_f32 (float32_t a) A32: VNEG. F32 Sd, Sm A64: FNEG Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Not(Vector128<Byte>) |
uint8x16_t vmvnq_u8 (uint8x16_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B |
Not(Vector128<Double>) |
float64x2_t vmvnq_f64 (float64x2_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Not(Vector128<Int16>) |
int16x8_t vmvnq_s16 (int16x8_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B |
Not(Vector128<Int32>) |
int32x4_t vmvnq_s32 (int32x4_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B |
Not(Vector128<Int64>) |
int64x2_t vmvnq_s64 (int64x2_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B |
Not(Vector128<SByte>) |
int8x16_t vmvnq_s8 (int8x16_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B |
Not(Vector128<Single>) |
float32x4_t vmvnq_f32 (float32x4_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Not(Vector128<UInt16>) |
uint16x8_t vmvnq_u16 (uint16x8_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B |
Not(Vector128<UInt32>) |
uint32x4_t vmvnq_u32 (uint32x4_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B |
Not(Vector128<UInt64>) |
uint64x2_t vmvnq_u64 (uint64x2_t a) A32: VMVN Qd, Qm A64: MVN Vd.16B, Vn.16B |
Not(Vector64<Byte>) |
uint8x8_t vmvn_u8 (uint8x8_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B |
Not(Vector64<Double>) |
float64x1_t vmvn_f64 (float64x1_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Not(Vector64<Int16>) |
int16x4_t vmvn_s16 (int16x4_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B |
Not(Vector64<Int32>) |
int32x2_t vmvn_s32 (int32x2_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B |
Not(Vector64<Int64>) |
int64x1_t vmvn_s64 (int64x1_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B |
Not(Vector64<SByte>) |
int8x8_t vmvn_s8 (int8x8_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B |
Not(Vector64<Single>) |
float32x2_t vmvn_f32 (float32x2_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Not(Vector64<UInt16>) |
uint16x4_t vmvn_u16 (uint16x4_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B |
Not(Vector64<UInt32>) |
uint32x2_t vmvn_u32 (uint32x2_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B |
Not(Vector64<UInt64>) |
uint64x1_t vmvn_u64 (uint64x1_t a) A32: VMVN Dd, Dm A64: MVN Vd.8B, Vn.8B |
Or(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vorrq_u8 (uint8x16_t a, uint8x16_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B |
Or(Vector128<Double>, Vector128<Double>) |
float64x2_t vorrq_f64 (float64x2_t a, float64x2_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Or(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vorrq_s16 (int16x8_t a, int16x8_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B |
Or(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vorrq_s32 (int32x4_t a, int32x4_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B |
Or(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vorrq_s64 (int64x2_t a, int64x2_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B |
Or(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vorrq_s8 (int8x16_t a, int8x16_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B |
Or(Vector128<Single>, Vector128<Single>) |
float32x4_t vorrq_f32 (a, float32x4_t b float32x4_t) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Or(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vorrq_u16 (uint16x8_t a, uint16x8_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B |
Or(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vorrq_u32 (uint32x4_t a, uint32x4_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B |
Or(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vorrq_u64 (uint64x2_t a, uint64x2_t b) A32: VORR Qd, Qn, Qm A64: ORR Vd.16B, Vn.16B, Vm.16B |
Or(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vorr_u8 (uint8x8_t a, uint8x8_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B |
Or(Vector64<Double>, Vector64<Double>) |
float64x1_t vorr_f64 (float64x1_t a, float64x1_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Or(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vorr_s16 (int16x4_t a, int16x4_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B |
Or(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vorr_s32 (int32x2_t a, int32x2_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B |
Or(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vorr_s64 (int64x1_t a, int64x1_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B |
Or(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vorr_s8 (int8x8_t a, int8x8_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B |
Or(Vector64<Single>, Vector64<Single>) |
float32x2_t vorr_f32 (float32x2_t a, float32x2_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Or(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vorr_u16 (uint16x4_t a, uint16x4_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B |
Or(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vorr_u32 (uint32x2_t a, uint32x2_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B |
Or(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vorr_u64 (uint64x1_t a, uint64x1_t b) A32: VORR Dd, Dn, Dm A64: ORR Vd.8B, Vn.8B, Vm.8B |
OrNot(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vornq_u8 (uint8x16_t a, uint8x16_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B |
OrNot(Vector128<Double>, Vector128<Double>) |
float64x2_t vornq_f64 (float64x2_t a, float64x2_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
OrNot(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vornq_s16 (int16x8_t a, int16x8_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B |
OrNot(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vornq_s32 (int32x4_t a, int32x4_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B |
OrNot(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vornq_s64 (a, int64x2_t b int64x2_t) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B |
OrNot(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vornq_s8 (int8x16_t a, int8x16_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B |
OrNot(Vector128<Single>, Vector128<Single>) |
float32x4_t vornq_f32 (float32x4_t a, float32x4_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
OrNot(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vornq_u16 (uint16x8_t a, uint16x8_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B |
OrNot(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vornq_u32 (uint32x4_t a, uint32x4_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B |
OrNot(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vornq_u64 (uint64x2_t a, uint64x2_t b) A32: VORN Qd, Qn, Qm A64: ORN Vd.16B, Vn.16B, Vm.16B |
OrNot(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vorn_u8 (uint8x8_t a, uint8x8_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B |
OrNot(Vector64<Double>, Vector64<Double>) |
float64x1_t vorn_f64 (float64x1_t a, float64x1_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
OrNot(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vorn_s16 (int16x4_t a, int16x4_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B |
OrNot(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vorn_s32 (int32x2_t a, int32x2_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B |
OrNot(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vorn_s64 (a, int64x1_t b int64x1_t) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B |
OrNot(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vorn_s8 (int8x8_t a, int8x8_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B |
OrNot(Vector64<Single>, Vector64<Single>) |
float32x2_t vorn_f32 (float32x2_t a, float32x2_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
OrNot(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vorn_u16 (uint16x4_t a, uint16x4_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B |
OrNot(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vorn_u32 (uint32x2_t a, uint32x2_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B |
OrNot(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vorn_u64 (uint64x1_t a, uint64x1_t b) A32: VORN Dd, Dn, Dm A64: ORN Vd.8B, Vn.8B, Vm.8B |
PolynomialMultiply(Vector128<Byte>, Vector128<Byte>) |
poly8x16_t vmulq_p8 (poly8x16_t a, poly8x16_t b) A32: VMUL. P8 Qd, Qn, Qm A64: PMUL Vd.16B, Vn.16B, Vm.16B |
PolynomialMultiply(Vector128<SByte>, Vector128<SByte>) |
poly8x16_t vmulq_p8 (poly8x16_t a, poly8x16_t b) A32: VMUL. P8 Qd, Qn, Qm A64: PMUL Vd.16B, Vn.16B, Vm.16B |
PolynomialMultiply(Vector64<Byte>, Vector64<Byte>) |
poly8x8_t vmul_p8 (poly8x8_t a, poly8x8_t b) A32: VMUL. P8 Dd, Dn, Dm A64: PMUL Vd.8B, Vn.8B, Vm.8B |
PolynomialMultiply(Vector64<SByte>, Vector64<SByte>) |
poly8x8_t vmul_p8 (poly8x8_t a, poly8x8_t b) A32: VMUL. P8 Dd, Dn, Dm A64: PMUL Vd.8B, Vn.8B, Vm.8B |
PolynomialMultiplyWideningLower(Vector64<Byte>, Vector64<Byte>) |
poly16x8_t vmull_p8 (poly8x8_t a, poly8x8_t b) A32: VMULL. P8 Qd, Dn, Dm A64: PMULL Vd.16B, Vn.8B, Vm.8B |
PolynomialMultiplyWideningLower(Vector64<SByte>, Vector64<SByte>) |
poly16x8_t vmull_p8 (poly8x8_t a, poly8x8_t b) A32: VMULL. P8 Qd, Dn, Dm A64: PMULL Vd.16B, Vn.8B, Vm.8B |
PolynomialMultiplyWideningUpper(Vector128<Byte>, Vector128<Byte>) |
poly16x8_t vmull_high_p8 (poly8x16_t a, poly8x16_t b) A32: VMULL. P8 Qd, Dn+1, Dm+1 A64: PMULL2 Vd.16B, Vn.16B, Vm.16B |
PolynomialMultiplyWideningUpper(Vector128<SByte>, Vector128<SByte>) |
poly16x8_t vmull_high_p8 (poly8x16_t a, poly8x16_t b) A32: VMULL. P8 Qd, Dn+1, Dm+1 A64: PMULL2 Vd.16B, Vn.16B, Vm.16B |
PopCount(Vector128<Byte>) |
uint8x16_t vcntq_u8 (uint8x16_t a) A32: VCNT. I8 Qd, Qm A64: CNT Vd.16B, Vn.16B |
PopCount(Vector128<SByte>) |
int8x16_t vcntq_s8 (int8x16_t a) A32: VCNT. I8 Qd, Qm A64: CNT Vd.16B, Vn.16B |
PopCount(Vector64<Byte>) |
uint8x8_t vcnt_u8 (uint8x8_t a) A32: VCNT. I8 Dd, Dm A64: CNT Vd.8B, Vn.8B |
PopCount(Vector64<SByte>) |
int8x8_t vcnt_s8 (int8x8_t a) A32: VCNT. I8 Dd, Dm A64: CNT Vd.8B, Vn.8B |
ReciprocalEstimate(Vector128<Single>) |
float32x4_t vrecpeq_f32 (float32x4_t a) A32: VRECPE. F32 Qd, Qm A64: FRECPE Vd.4S, Vn.4S |
ReciprocalEstimate(Vector128<UInt32>) |
uint32x4_t vrecpeq_u32 (uint32x4_t a) A32: VRECPE. U32 Qd, Qm A64: URECPE Vd.4S, Vn.4S |
ReciprocalEstimate(Vector64<Single>) |
float32x2_t vrecpe_f32 (float32x2_t a) A32: VRECPE. F32 Dd, Dm A64: FRECPE Vd.2S, Vn.2S |
ReciprocalEstimate(Vector64<UInt32>) |
uint32x2_t vrecpe_u32 (uint32x2_t a) A32: VRECPE. U32 Dd, Dm A64: URECPE Vd.2S, Vn.2S |
ReciprocalSquareRootEstimate(Vector128<Single>) |
float32x4_t vrsqrteq_f32 (float32x4_t a) A32: VRSQRTE. F32 Qd, Qm A64: FRSQRTE Vd.4S, Vn.4S |
ReciprocalSquareRootEstimate(Vector128<UInt32>) |
uint32x4_t vrsqrteq_u32 (uint32x4_t a) A32: VRSQRTE. U32 Qd, Qm A64: URSQRTE Vd.4S, Vn.4S |
ReciprocalSquareRootEstimate(Vector64<Single>) |
float32x2_t vrsqrte_f32 (float32x2_t a) A32: VRSQRTE. F32 Dd, Dm A64: FRSQRTE Vd.2S, Vn.2S |
ReciprocalSquareRootEstimate(Vector64<UInt32>) |
uint32x2_t vrsqrte_u32 (uint32x2_t a) A32: VRSQRTE. U32 Dd, Dm A64: URSQRTE Vd.2S, Vn.2S |
ReciprocalSquareRootStep(Vector128<Single>, Vector128<Single>) |
float32x4_t vrsqrtsq_f32 (float32x4_t a, float32x4_t b) A32: VRSQRTS. F32 Qd, Qn, Qm A64: FRSQRTS Vd.4S, Vn.4S, Vm.4S |
ReciprocalSquareRootStep(Vector64<Single>, Vector64<Single>) |
float32x2_t vrsqrts_f32 (float32x2_t a, float32x2_t b) A32: VRSQRTS. F32 Dd, Dn, Dm A64: FRSQRTS Vd.2S, Vn.2S, Vm.2S |
ReciprocalStep(Vector128<Single>, Vector128<Single>) |
float32x4_t vrecpsq_f32 (float32x4_t a, float32x4_t b) A32: VRECPS. F32 Qd, Qn, Qm A64: FRECPS Vd.4S, Vn.4S, Vm.4S |
ReciprocalStep(Vector64<Single>, Vector64<Single>) |
float32x2_t vrecps_f32 (float32x2_t a, float32x2_t b) A32: VRECPS. F32 Dd, Dn, Dm A64: FRECPS Vd.2S, Vn.2S, Vm.2S |
ReverseElement16(Vector128<Int32>) |
int16x8_t vrev32q_s16 (int16x8_t vec) A32: VREV32.16 Qd, Qm A64: REV32 Vd.8H, Vn.8H |
ReverseElement16(Vector128<Int64>) |
int16x8_t vrev64q_s16 (int16x8_t vec) A32: VREV64.16 Qd, Qm A64: REV64 Vd.8H, Vn.8H |
ReverseElement16(Vector128<UInt32>) |
uint16x8_t vrev32q_u16 (uint16x8_t vec) A32: VREV32.16 Qd, Qm A64: REV32 Vd.8H, Vn.8H |
ReverseElement16(Vector128<UInt64>) |
uint16x8_t vrev64q_u16 (uint16x8_t vec) A32: VREV64.16 Qd, Qm A64: REV64 Vd.8H, Vn.8H |
ReverseElement16(Vector64<Int32>) |
int16x4_t vrev32_s16 (int16x4_t vec) A32: VREV32.16 Dd, Dm A64: REV32 Vd.4H, Vn.4H |
ReverseElement16(Vector64<Int64>) |
int16x4_t vrev64_s16 (int16x4_t vec) A32: VREV64.16 Dd, Dm A64: REV64 Vd.4H, Vn.4H |
ReverseElement16(Vector64<UInt32>) |
uint16x4_t vrev32_u16 (uint16x4_t vec) A32: VREV32.16 Dd, Dm A64: REV32 Vd.4H, Vn.4H |
ReverseElement16(Vector64<UInt64>) |
uint16x4_t vrev64_u16 (uint16x4_t vec) A32: VREV64.16 Dd, Dm A64: REV64 Vd.4H, Vn.4H |
ReverseElement32(Vector128<Int64>) |
int32x4_t vrev64q_s32 (int32x4_t vec) A32: VREV64.32 Qd, Qm A64: REV64 Vd.4S, Vn.4S |
ReverseElement32(Vector128<UInt64>) |
uint32x4_t vrev64q_u32 (uint32x4_t vec) A32: VREV64.32 Qd, Qm A64: REV64 Vd.4S, Vn.4S |
ReverseElement32(Vector64<Int64>) |
int32x2_t vrev64_s32 (int32x2_t vec) A32: VREV64.32 Dd, Dm A64: REV64 Vd.2S, Vn.2S |
ReverseElement32(Vector64<UInt64>) |
uint32x2_t vrev64_u32 (uint32x2_t vec) A32: VREV64.32 Dd, Dm A64: REV64 Vd.2S, Vn.2S |
ReverseElement8(Vector128<Int16>) |
int8x16_t vrev16q_s8 (int8x16_t vec) A32: VREV16.8 Qd, Qm A64: REV16 Vd.16B, Vn.16B |
ReverseElement8(Vector128<Int32>) |
int8x16_t vrev32q_s8 (int8x16_t vec) A32: VREV32.8 Qd, Qm A64: REV32 Vd.16B, Vn.16B |
ReverseElement8(Vector128<Int64>) |
int8x16_t vrev64q_s8 (int8x16_t vec) A32: VREV64.8 Qd, Qm A64: REV64 Vd.16B, Vn.16B |
ReverseElement8(Vector128<UInt16>) |
uint8x16_t vrev16q_u8 (uint8x16_t vec) A32: VREV16.8 Qd, Qm A64: REV16 Vd.16B, Vn.16B |
ReverseElement8(Vector128<UInt32>) |
uint8x16_t vrev32q_u8 (uint8x16_t vec) A32: VREV32.8 Qd, Qm A64: REV32 Vd.16B, Vn.16B |
ReverseElement8(Vector128<UInt64>) |
uint8x16_t vrev64q_u8 (uint8x16_t vec) A32: VREV64.8 Qd, Qm A64: REV64 Vd.16B, Vn.16B |
ReverseElement8(Vector64<Int16>) |
int8x8_t vrev16_s8 (int8x8_t vec) A32: VREV16.8 Dd, Dm A64: REV16 Vd.8B, Vn.8B |
ReverseElement8(Vector64<Int32>) |
int8x8_t vrev32_s8 (int8x8_t vec) A32: VREV32.8 Dd, Dm A64: REV32 Vd.8B, Vn.8B |
ReverseElement8(Vector64<Int64>) |
int8x8_t vrev64_s8 (int8x8_t vec) A32: VREV64.8 Dd, Dm A64: REV64 Vd.8B, Vn.8B |
ReverseElement8(Vector64<UInt16>) |
uint8x8_t vrev16_u8 (uint8x8_t vec) A32: VREV16.8 Dd, Dm A64: REV16 Vd.8B, Vn.8B |
ReverseElement8(Vector64<UInt32>) |
uint8x8_t vrev32_u8 (uint8x8_t vec) A32: VREV32.8 Dd, Dm A64: REV32 Vd.8B, Vn.8B |
ReverseElement8(Vector64<UInt64>) |
uint8x8_t vrev64_u8 (uint8x8_t vec) A32: VREV64.8 Dd, Dm A64: REV64 Vd.8B, Vn.8B |
RoundAwayFromZero(Vector128<Single>) |
float32x4_t vrndaq_f32 (a float32x4_t) A32: VRINTA. F32 Qd, Qm A64: FRINTA Vd.4S, Vn.4S |
RoundAwayFromZero(Vector64<Single>) |
float32x2_t vrnda_f32 (float32x2_t a) A32: VRINTA. F32 Dd, Dm A64: FRINTA Vd.2S, Vn.2S |
RoundAwayFromZeroScalar(Vector64<Double>) |
float64x1_t vrnda_f64 (float64x1_t a) A32: VRINTA. F64 Dd, Dm A64: FRINTA Dd, Dn |
RoundAwayFromZeroScalar(Vector64<Single>) |
float32_t vrndas_f32 (float32_t a) A32: VRINTA. F32 Sd, Sm A64: FRINTA Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
RoundToNearest(Vector128<Single>) |
float32x4_t vrndnq_f32 (float32x4_t a) A32: VRINTN. F32 Qd, Qm A64: FRINTN Vd.4S, Vn.4S |
RoundToNearest(Vector64<Single>) |
float32x2_t vrndn_f32 (float32x2_t a) A32: VRINTN. F32 Dd, Dm A64: FRINTN Vd.2S, Vn.2S |
RoundToNearestScalar(Vector64<Double>) |
float64x1_t vrndn_f64 (float64x1_t a) A32: VRINTN. F64 Dd, Dm A64: FRINTN Dd, Dn |
RoundToNearestScalar(Vector64<Single>) |
float32_t vrndns_f32 (float32_t a) A32: VRINTN. F32 Sd, Sm A64: FRINTN Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
RoundToNegativeInfinity(Vector128<Single>) |
float32x4_t vrndmq_f32 (float32x4_t a) A32: VRINTM. F32 Qd, Qm A64: FRINTM Vd.4S, Vn.4S |
RoundToNegativeInfinity(Vector64<Single>) |
float32x2_t vrndm_f32 (float32x2_t a) A32: VRINTM. F32 Dd, Dm A64: FRINTM Vd.2S, Vn.2S |
RoundToNegativeInfinityScalar(Vector64<Double>) |
float64x1_t vrndm_f64 (float64x1_t a) A32: VRINTM. F64 Dd, Dm A64: FRINTM Dd, Dn |
RoundToNegativeInfinityScalar(Vector64<Single>) |
float32_t vrndms_f32 (float32_t a) A32: VRINTM. F32 Sd, Sm A64: FRINTM Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
RoundToPositiveInfinity(Vector128<Single>) |
float32x4_t vrndpq_f32 (float32x4_t a) A32: VRINTP. F32 Qd, Qm A64: FRINTP Vd.4S, Vn.4S |
RoundToPositiveInfinity(Vector64<Single>) |
float32x2_t vrndp_f32 (float32x2_t a) A32: VRINTP. F32 Dd, Dm A64: FRINTP Vd.2S, Vn.2S |
RoundToPositiveInfinityScalar(Vector64<Double>) |
float64x1_t vrndp_f64 (float64x1_t a) A32: VRINTP. F64 Dd, Dm A64: FRINTP Dd, Dn |
RoundToPositiveInfinityScalar(Vector64<Single>) |
float32_t vrndps_f32 (float32_t a) A32: VRINTP. F32 Sd, Sm A64: FRINTP Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
RoundToZero(Vector128<Single>) |
float32x4_t vrndq_f32 (float32x4_t a) A32: VRINTZ. F32 Qd, Qm A64: FRINTZ Vd.4S, Vn.4S |
RoundToZero(Vector64<Single>) |
float32x2_t vrnd_f32 (float32x2_t a) A32: VRINTZ. F32 Dd, Dm A64: FRINTZ Vd.2S, Vn.2S |
RoundToZeroScalar(Vector64<Double>) |
float64x1_t vrnd_f64 (a float64x1_t) A32: VRINTZ. F64 Dd, Dm A64: FRINTZ Dd, Dn |
RoundToZeroScalar(Vector64<Single>) |
float32_t vrnds_f32 (float32_t a) A32: VRINTZ. F32 Sd, Sm A64: FRINTZ Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
ShiftArithmetic(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vshlq_s16 (a, int16x8_t b int16x8_t) A32: VSHL. S16 Qd, Qn, Qm A64: SSHL Vd.8H, Vn.8H, Vm.8H |
ShiftArithmetic(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vshlq_s32 (int32x4_t a, int32x4_t b) A32: VSHL. S32 Qd, Qn, Qm A64: SSHL Vd.4S, Vn.4S, Vm.4S |
ShiftArithmetic(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vshlq_s64 (int64x2_t a, int64x2_t b) A32: VSHL. S64 Qd, Qn, Qm A64: SSHL Vd.2D, Vn.2D, Vm.2D |
ShiftArithmetic(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vshlq_s8 (int8x16_t a, int8x16_t b) A32: VSHL. S8 Qd, Qn, Qm A64: SSHL Vd.16B, Vn.16B, Vm.16B |
ShiftArithmetic(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vshl_s16 (int16x4_t a, int16x4_t b) A32: VSHL. S16 Dd, Dn, Dm A64: SSHL Vd.4H, Vn.4H, Vm.4H |
ShiftArithmetic(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vshl_s32 (int32x2_t a, int32x2_t b) A32: VSHL. S32 Dd, Dn, Dm A64: SSHL Vd.2S, Vn.2S, Vm.2S |
ShiftArithmetic(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vshl_s8 (int8x8_t a, int8x8_t b) A32: VSHL. S8 Dd, Dn, Dm A64: SSHL Vd.8B, Vn.8B, Vm.8B |
ShiftArithmeticRounded(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vrshlq_s16 (int16x8_t a, int16x8_t b) A32: VRSHL. S16 Qd, Qn, Qm A64: SRSHL Vd.8H, Vn.8H, Vm.8H |
ShiftArithmeticRounded(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vrshlq_s32 (int32x4_t a, int32x4_t b) A32: VRSHL. S32 Qd, Qn, Qm A64: SRSHL Vd.4S, Vn.4S, Vm.4S |
ShiftArithmeticRounded(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vrshlq_s64 (int64x2_t a, int64x2_t b) A32: VRSHL. S64 Qd, Qn, Qm A64: SRSHL Vd.2D, Vn.2D, Vm.2D |
ShiftArithmeticRounded(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vrshlq_s8 (a, int8x16_t b int8x16_t) A32: VRSHL. S8 Qd, Qn, Qm A64: SRSHL Vd.16B, Vn.16B, Vm.16B |
ShiftArithmeticRounded(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vrshl_s16 (int16x4_t a, int16x4_t b) A32: VRSHL. S16 Dd, Dn, Dm A64: SRSHL Vd.4H, Vn.4H, Vm.4H |
ShiftArithmeticRounded(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vrshl_s32 (int32x2_t a, int32x2_t b) A32: VRSHL. S32 Dd, Dn, Dm A64: SRSHL Vd.2S, Vn.2S, Vm.2S |
ShiftArithmeticRounded(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vrshl_s8 (a, int8x8_t b int8x8_t) A32: VRSHL. S8 Dd, Dn, Dm A64: SRSHL Vd.8B, Vn.8B, Vm.8B |
ShiftArithmeticRoundedSaturate(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vqrshlq_s16 (int16x8_t a, int16x8_t b) A32: VQRSHL. S16 Qd, Qn, Qm A64: SQRSHL Vd.8H, Vn.8H, Vm.8H |
ShiftArithmeticRoundedSaturate(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vqrshlq_s32 (int32x4_t a, int32x4_t b) A32: VQRSHL. S32 Qd, Qn, Qm A64: SQRSHL Vd.4S, Vn.4S, Vm.4S |
ShiftArithmeticRoundedSaturate(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vqrshlq_s64 (int64x2_t a, int64x2_t b) A32: VQRSHL. S64 Qd, Qn, Qm A64: SQRSHL Vd.2D, Vn.2D, Vm.2D |
ShiftArithmeticRoundedSaturate(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vqrshlq_s8 (int8x16_t a, int8x16_t b) A32: VQRSHL. S8 Qd, Qn, Qm A64: SQRSHL Vd.16B, Vn.16B, Vm.16B |
ShiftArithmeticRoundedSaturate(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vqrshl_s16 (int16x4_t a, int16x4_t b) A32: VQRSHL. S16 Dd, Dn, Dm A64: SQRSHL Vd.4H, Vn.4H, Vm.4H |
ShiftArithmeticRoundedSaturate(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vqrshl_s32 (int32x2_t a, int32x2_t b) A32: VQRSHL. S32 Dd, Dn, Dm A64: SQRSHL Vd.2S, Vn.2S, Vm.2S |
ShiftArithmeticRoundedSaturate(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vqrshl_s8 (int8x8_t a, int8x8_t b) A32: VQRSHL. S8 Dd, Dn, Dm A64: SQRSHL Vd.8B, Vn.8B, Vm.8B |
ShiftArithmeticRoundedSaturateScalar(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vqrshl_s64 (int64x1_t a, int64x1_t b) A32: VQRSHL. S64 Dd, Dn, Dm A64: SQRSHL Dd, Dn, Dm |
ShiftArithmeticRoundedScalar(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vrshl_s64 (int64x1_t a, int64x1_t b) A32: VRSHL. S64 Dd, Dn, Dm A64: SRSHL Dd, Dn, Dm |
ShiftArithmeticSaturate(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vqshlq_s16 (int16x8_t a, int16x8_t b) A32: VQSHL. S16 Qd, Qn, Qm A64: SQSHL Vd.8H, Vn.8H, Vm.8H |
ShiftArithmeticSaturate(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vqshlq_s32 (int32x4_t a, int32x4_t b) A32: VQSHL. S32 Qd, Qn, Qm A64: SQSHL Vd.4S, Vn.4S, Vm.4S |
ShiftArithmeticSaturate(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vqshlq_s64 (int64x2_t a, int64x2_t b) A32: VQSHL. S64 Qd, Qn, Qm A64: SQSHL Vd.2D, Vn.2D, Vm.2D |
ShiftArithmeticSaturate(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vqshlq_s8 (int8x16_t a, int8x16_t b) A32: VQSHL. S8 Qd, Qn, Qm A64: SQSHL Vd.16B, Vn.16B, Vm.16B |
ShiftArithmeticSaturate(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vqshl_s16 (int16x4_t a, int16x4_t b) A32: VQSHL. S16 Dd, Dn, Dm A64: SQSHL Vd.4H, Vn.4H, Vm.4H |
ShiftArithmeticSaturate(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vqshl_s32 (int32x2_t a, int32x2_t b) A32: VQSHL. S32 Dd, Dn, Dm A64: SQSHL Vd.2S, Vn.2S, Vm.2S |
ShiftArithmeticSaturate(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vqshl_s8 (int8x8_t a, int8x8_t b) A32: VQSHL. S8 Dd, Dn, Dm A64: SQSHL Vd.8B, Vn.8B, Vm.8B |
ShiftArithmeticSaturateScalar(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vqshl_s64 (a, int64x1_t b int64x1_t) A32: VQSHL. S64 Dd, Dn, Dm A64: SQSHL Dd, Dn, Dm |
ShiftArithmeticScalar(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vshl_s64 (a, int64x1_t b int64x1_t) A32: VSHL. S64 Dd, Dn, Dm A64: SSHL Dd, Dn, Dm |
ShiftLeftAndInsert(Vector128<Byte>, Vector128<Byte>, Byte) |
uint8x16_t vsliq_n_u8(uint8x16_t a, uint8x16_t b, __builtin_constant_p(n)) A32: VSLI.8 Qd, Qm, #n A64: SLI Vd.16B, Vn.16B, #n |
ShiftLeftAndInsert(Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vsliq_n_s16(int16x8_t a, int16x8_t b, __builtin_constant_p(n)) A32: VSLI.16 Qd, Qm, #n A64: SLI Vd.8H, Vn.8H, #n |
ShiftLeftAndInsert(Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vsliq_n_s32(a, int32x4_t b, __builtin_constant_p(n)) int32x4_t A32: VSLI.32 Qd, Qm, #n A64: SLI Vd.4S, Vn.4S, #n |
ShiftLeftAndInsert(Vector128<Int64>, Vector128<Int64>, Byte) |
int64x2_t vsliq_n_s64(int64x2_t a, int64x2_t b, __builtin_constant_p(n)) A32: VSLI.64 Qd, Qm, #n A64: SLI Vd.2D, Vn.2D, #n |
ShiftLeftAndInsert(Vector128<SByte>, Vector128<SByte>, Byte) |
int8x16_t vsliq_n_s8(a, int8x16_t b, __builtin_constant_p(n)) int8x16_t A32: VSLI.8 Qd, Qm, #n A64: SLI Vd.16B, Vn.16B, #n |
ShiftLeftAndInsert(Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint16x8_t vsliq_n_u16(uint16x8_t a, uint16x8_t b, __builtin_constant_p(n)) A32: VSLI.16 Qd, Qm, #n A64: SLI Vd.8H, Vn.8H, #n |
ShiftLeftAndInsert(Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint32x4_t vsliq_n_u32(a, uint32x4_t b, __builtin_constant_p(n)) uint32x4_t A32: VSLI.32 Qd, Qm, #n A64: SLI Vd.4S, Vn.4S, #n |
ShiftLeftAndInsert(Vector128<UInt64>, Vector128<UInt64>, Byte) |
uint64x2_t vsliq_n_u64(a, uint64x2_t b, __builtin_constant_p(n)) uint64x2_t A32: VSLI.64 Qd, Qm, #n A64: SLI Vd.2D, Vn.2D, #n |
ShiftLeftAndInsert(Vector64<Byte>, Vector64<Byte>, Byte) |
uint8x8_t vsli_n_u8(a, uint8x8_t b, __builtin_constant_p(n)) uint8x8_t A32: VSLI.8 Dd, Dm, #n A64: SLI Vd.8B, Vn.8B, #n |
ShiftLeftAndInsert(Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vsli_n_s16(int16x4_t a, int16x4_t b, __builtin_constant_p(n)) A32: VSLI.16 Dd, Dm, #n A64: SLI Vd.4H, Vn.4H, #n |
ShiftLeftAndInsert(Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vsli_n_s32(a, int32x2_t b, __builtin_constant_p(n)) int32x2_t A32: VSLI.32 Dd, Dm, #n A64: SLI Vd.2S, Vn.2S, #n |
ShiftLeftAndInsert(Vector64<SByte>, Vector64<SByte>, Byte) |
int8x8_t vsli_n_s8(a, int8x8_t b, __builtin_constant_p(n)) int8x8_t A32: VSLI.8 Dd, Dm, #n A64: SLI Vd.8B, Vn.8B, #n |
ShiftLeftAndInsert(Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint16x4_t vsli_n_u16(a, uint16x4_t b, __builtin_constant_p(n)) uint16x4_t A32: VSLI.16 Dd, Dm, #n A64: SLI Vd.4H, Vn.4H, #n |
ShiftLeftAndInsert(Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint32x2_t vsli_n_u32(uint32x2_t a, uint32x2_t b, __builtin_constant_p(n)) A32: VSLI.32 Dd, Dm, #n A64: SLI Vd.2S, Vn.2S, #n |
ShiftLeftAndInsertScalar(Vector64<Int64>, Vector64<Int64>, Byte) |
int64_t vslid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n)) A32: VSLI.64 Dd, Dm, #n A64: SLI Dd, Dn, #n |
ShiftLeftAndInsertScalar(Vector64<UInt64>, Vector64<UInt64>, Byte) |
uint64_t vslid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n)) A32: VSLI.64 Dd, Dm, #n A64: SLI Dd, Dn, #n |
ShiftLeftLogical(Vector128<Byte>, Byte) |
uint8x16_t vshlq_n_u8 (a, const int n uint8x16_t) A32: VSHL. I8 Qd, Qm, #n A64: SHL Vd.16B, Vn.16B, #n |
ShiftLeftLogical(Vector128<Int16>, Byte) |
int16x8_t vshlq_n_s16 (a, const int n int16x8_t) A32: VSHL. I16 Qd, Qm, #n A64: SHL Vd.8H, Vn.8H, #n |
ShiftLeftLogical(Vector128<Int64>, Byte) |
int64x2_t vshlq_n_s64 (a, const int n int64x2_t) A32: VSHL. I64 Qd, Qm, #n A64: SHL Vd.2D, Vn.2D, #n |
ShiftLeftLogical(Vector128<SByte>, Byte) |
int8x16_t vshlq_n_s8 (a, const int n int8x16_t) A32: VSHL. I8 Qd, Qm, #n A64: SHL Vd.16B, Vn.16B, #n |
ShiftLeftLogical(Vector128<UInt16>, Byte) |
uint16x8_t vshlq_n_u16 (a, const int n uint16x8_t) A32: VSHL. I16 Qd, Qm, #n A64: SHL Vd.8H, Vn.8H, #n |
ShiftLeftLogical(Vector128<UInt32>, Byte) |
uint32x4_t vshlq_n_u32 (a, const int n uint32x4_t) A32: VSHL. I32 Qd, Qm, #n A64: SHL Vd.4S, Vn.4S, #n |
ShiftLeftLogical(Vector128<UInt64>, Byte) |
uint64x2_t vshlq_n_u64 (a, const int n uint64x2_t) A32: VSHL. I64 Qd, Qm, #n A64: SHL Vd.2D, Vn.2D, #n |
ShiftLeftLogical(Vector64<Byte>, Byte) |
uint8x8_t vshl_n_u8 (a, const int n uint8x8_t) A32: VSHL. I8 Dd, Dm, #n A64: SHL Vd.8B, Vn.8B, #n |
ShiftLeftLogical(Vector64<Int16>, Byte) |
int16x4_t vshl_n_s16 (a, const int n int16x4_t) A32: VSHL. I16 Dd, Dm, #n A64: SHL Vd.4H, Vn.4H, #n |
ShiftLeftLogical(Vector64<Int32>, Byte) |
int32x2_t vshl_n_s32 (a, const int n int32x2_t) A32: VSHL. I32 Dd, Dm, #n A64: SHL Vd.2S, Vn.2S, #n |
ShiftLeftLogical(Vector64<SByte>, Byte) |
int8x8_t vshl_n_s8 (a, const int n int8x8_t) A32: VSHL. I8 Dd, Dm, #n A64: SHL Vd.8B, Vn.8B, #n |
ShiftLeftLogical(Vector64<UInt16>, Byte) |
uint16x4_t vshl_n_u16 (a, const int n uint16x4_t) A32: VSHL. I16 Dd, Dm, #n A64: SHL Vd.4H, Vn.4H, #n |
ShiftLeftLogical(Vector64<UInt32>, Byte) |
uint32x2_t vshl_n_u32 (a, const int n uint32x2_t) A32: VSHL. I32 Dd, Dm, #n A64: SHL Vd.2S, Vn.2S, #n |
ShiftLeftLogicalSaturate(Vector128<Byte>, Byte) |
uint8x16_t vqshlq_n_u8 (a, const int n uint8x16_t) A32: VQSHL. U8 Qd, Qm, #n A64: UQSHL Vd.16B, Vn.16B, #n |
ShiftLeftLogicalSaturate(Vector128<Int16>, Byte) |
int16x8_t vqshlq_n_s16 (a, const int n int16x8_t) A32: VQSHL. S16 Qd, Qm, #n A64: SQSHL Vd.8H, Vn.8H, #n |
ShiftLeftLogicalSaturate(Vector128<Int32>, Byte) |
int32x4_t vqshlq_n_s32 (a, const int n int32x4_t) A32: VQSHL. S32 Qd, Qm, #n A64: SQSHL Vd.4S, Vn.4S, #n |
ShiftLeftLogicalSaturate(Vector128<Int64>, Byte) |
int64x2_t vqshlq_n_s64 (a, const int n int64x2_t) A32: VQSHL. S64 Qd, Qm, #n A64: SQSHL Vd.2D, Vn.2D, #n |
ShiftLeftLogicalSaturate(Vector128<SByte>, Byte) |
int8x16_t vqshlq_n_s8 (a, const int n int8x16_t) A32: VQSHL. S8 Qd, Qm, #n A64: SQSHL Vd.16B, Vn.16B, #n |
ShiftLeftLogicalSaturate(Vector128<UInt16>, Byte) |
uint16x8_t vqshlq_n_u16 (a, const int n uint16x8_t) A32: VQSHL. U16 Qd, Qm, #n A64: UQSHL Vd.8H, Vn.8H, #n |
ShiftLeftLogicalSaturate(Vector128<UInt32>, Byte) |
uint32x4_t vqshlq_n_u32 (a, const int n uint32x4_t) A32: VQSHL. U32 Qd, Qm, #n A64: UQSHL Vd.4S, Vn.4S, #n |
ShiftLeftLogicalSaturate(Vector128<UInt64>, Byte) |
uint64x2_t vqshlq_n_u64 (uint64x2_t a, const int n) A32: VQSHL. U64 Qd, Qm, #n A64: UQSHL Vd.2D, Vn.2D, #n |
ShiftLeftLogicalSaturate(Vector64<Byte>, Byte) |
uint8x8_t vqshl_n_u8 (a, const int n uint8x8_t) A32: VQSHL. U8 Dd, Dm, #n A64: UQSHL Vd.8B, Vn.8B, #n |
ShiftLeftLogicalSaturate(Vector64<Int16>, Byte) |
int16x4_t vqshl_n_s16 (a, const int n int16x4_t) A32: VQSHL. S16 Dd, Dm, #n A64: SQSHL Vd.4H, Vn.4H, #n |
ShiftLeftLogicalSaturate(Vector64<Int32>, Byte) |
int32x2_t vqshl_n_s32 (a, const int n int32x2_t) A32: VQSHL. S32 Dd, Dm, #n A64: SQSHL Vd.2S, Vn.2S, #n |
ShiftLeftLogicalSaturate(Vector64<SByte>, Byte) |
int8x8_t vqshl_n_s8 (a, const int n int8x8_t) A32: VQSHL. S8 Dd, Dm, #n A64: SQSHL Vd.8B, Vn.8B, #n |
ShiftLeftLogicalSaturate(Vector64<UInt16>, Byte) |
uint16x4_t vqshl_n_u16 (a, const int n uint16x4_t) A32: VQSHL. U16 Dd, Dm, #n A64: UQSHL Vd.4H, Vn.4H, #n |
ShiftLeftLogicalSaturate(Vector64<UInt32>, Byte) |
uint32x2_t vqshl_n_u32 (a, const int n uint32x2_t) A32: VQSHL. U32 Dd, Dm, #n A64: UQSHL Vd.2S, Vn.2S, #n |
ShiftLeftLogicalSaturateScalar(Vector64<Int64>, Byte) |
int64x1_t vqshl_n_s64 (a, const int n int64x1_t) A32: VQSHL. S64 Dd, Dm, #n A64: SQSHL Dd, Dn, #n |
ShiftLeftLogicalSaturateScalar(Vector64<UInt64>, Byte) |
uint64x1_t vqshl_n_u64 (a, const int n uint64x1_t) A32: VQSHL. U64 Dd, Dm, #n A64: UQSHL Dd, Dn, #n |
ShiftLeftLogicalSaturateUnsigned(Vector128<Int16>, Byte) |
uint16x8_t vqshluq_n_s16 (a, const int n int16x8_t) A32: VQSHLU. S16 Qd, Qm, #n A64: SQSHLU Vd.8H, Vn.8H, #n |
ShiftLeftLogicalSaturateUnsigned(Vector128<Int32>, Byte) |
uint32x4_t vqshluq_n_s32 (a, const int n int32x4_t) A32: VQSHLU. S32 Qd, Qm, #n A64: SQSHLU Vd.4S, Vn.4S, #n |
ShiftLeftLogicalSaturateUnsigned(Vector128<Int64>, Byte) |
uint64x2_t vqshluq_n_s64 (a, const int n int64x2_t) A32: VQSHLU. S64 Qd, Qm, #n A64: SQSHLU Vd.2D, Vn.2D, #n |
ShiftLeftLogicalSaturateUnsigned(Vector128<SByte>, Byte) |
uint8x16_t vqshluq_n_s8 (a, const int n int8x16_t) A32: VQSHLU. S8 Qd, Qm, #n A64: SQSHLU Vd.16B, Vn.16B, #n |
ShiftLeftLogicalSaturateUnsigned(Vector64<Int16>, Byte) |
uint16x4_t vqshlu_n_s16 (a, const int n int16x4_t) A32: VQSHLU. S16 Dd, Dm, #n A64: SQSHLU Vd.4H, Vn.4H, #n |
ShiftLeftLogicalSaturateUnsigned(Vector64<Int32>, Byte) |
uint32x2_t vqshlu_n_s32 (a, const int n int32x2_t) A32: VQSHLU. S32 Dd, Dm, #n A64: SQSHLU Vd.2S, Vn.2S, #n |
ShiftLeftLogicalSaturateUnsigned(Vector64<SByte>, Byte) |
uint8x8_t vqshlu_n_s8 (a, const int n int8x8_t) A32: VQSHLU. S8 Dd, Dm, #n A64: SQSHLU Vd.8B, Vn.8B, #n |
ShiftLeftLogicalSaturateUnsignedScalar(Vector64<Int64>, Byte) |
uint64x1_t vqshlu_n_s64 (a, const int n int64x1_t) A32: VQSHLU. S64 Dd, Dm, #n A64: SQSHLU Dd, Dn, #n |
ShiftLeftLogicalScalar(Vector64<Int64>, Byte) |
int64x1_t vshl_n_s64 (a, const int n int64x1_t) A32: VSHL. I64 Dd, Dm, #n A64: SHL Dd, Dn, #n |
ShiftLeftLogicalScalar(Vector64<UInt64>, Byte) |
uint64x1_t vshl_n_u64 (a, const int n uint64x1_t) A32: VSHL. I64 Dd, Dm, #n A64: SHL Dd, Dn, #n |
ShiftLeftLogicalWideningLower(Vector64<Byte>, Byte) |
uint16x8_t vshll_n_u8 (a, const int n uint8x8_t) A32: VSHLL. U8 Qd, Dm, #n A64: USHLL Vd.8H, Vn.8B, #n |
ShiftLeftLogicalWideningLower(Vector64<Int16>, Byte) |
int32x4_t vshll_n_s16 (a, const int n int16x4_t) A32: VSHLL. S16 Qd, Dm, #n A64: SSHLL Vd.4S, Vn.4H, #n |
ShiftLeftLogicalWideningLower(Vector64<Int32>, Byte) |
int64x2_t vshll_n_s32 (a, const int n int32x2_t) A32: VSHLL. S32 Qd, Dm, #n A64: SSHLL Vd.2D, Vn.2S, #n |
ShiftLeftLogicalWideningLower(Vector64<SByte>, Byte) |
int16x8_t vshll_n_s8 (a, const int n int8x8_t) A32: VSHLL. S8 Qd, Dm, #n A64: SSHLL Vd.8H, Vn.8B, #n |
ShiftLeftLogicalWideningLower(Vector64<UInt16>, Byte) |
uint32x4_t vshll_n_u16 (a, const int n uint16x4_t) A32: VSHLL. U16 Qd, Dm, #n A64: USHLL Vd.4S, Vn.4H, #n |
ShiftLeftLogicalWideningLower(Vector64<UInt32>, Byte) |
uint64x2_t vshll_n_u32 (a, const int n uint32x2_t) A32: VSHLL. U32 Qd, Dm, #n A64: USHLL Vd.2D, Vn.2S, #n |
ShiftLeftLogicalWideningUpper(Vector128<Byte>, Byte) |
uint16x8_t vshll_high_n_u8 (a, const int n uint8x16_t) A32: VSHLL. U8 Qd, Dm+1, #n A64: USHLL2 Vd.8H, Vn.16B, #n |
ShiftLeftLogicalWideningUpper(Vector128<Int16>, Byte) |
int32x4_t vshll_high_n_s16 (a, const int n int16x8_t) A32: VSHLL. S16 Qd, Dm+1, #n A64: SSHLL2 Vd.4S, Vn.8H, #n |
ShiftLeftLogicalWideningUpper(Vector128<Int32>, Byte) |
int64x2_t vshll_high_n_s32 (a, const int n int32x4_t) A32: VSHLL. S32 Qd, Dm+1, #n A64: SSHLL2 Vd.2D, Vn.4S, #n |
ShiftLeftLogicalWideningUpper(Vector128<SByte>, Byte) |
int16x8_t vshll_high_n_s8 (a, const int n int8x16_t) A32: VSHLL. S8 Qd, Dm+1, #n A64: SSHLL2 Vd.8H, Vn.16B, #n |
ShiftLeftLogicalWideningUpper(Vector128<UInt16>, Byte) |
uint32x4_t vshll_high_n_u16 (a, const int n uint16x8_t) A32: VSHLL. U16 Qd, Dm+1, #n A64: USHLL2 Vd.4S, Vn.8H, #n |
ShiftLeftLogicalWideningUpper(Vector128<UInt32>, Byte) |
uint64x2_t vshll_high_n_u32 (a, const int n uint32x4_t) A32: VSHLL. U32 Qd, Dm+1, #n A64: USHLL2 Vd.2D, Vn.4S, #n |
ShiftLogical(Vector128<Byte>, Vector128<SByte>) |
uint8x16_t vshlq_u8 (uint8x16_t a, int8x16_t b) A32: VSHL. U8 Qd, Qn, Qm A64: USHL Vd.16B, Vn.16B, Vm.16B |
ShiftLogical(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vshlq_u16 (uint16x8_t a, int16x8_t b) A32: VSHL. U16 Qd, Qn, Qm A64: USHL Vd.8H, Vn.8H, Vm.8H |
ShiftLogical(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vshlq_u32 (uint32x4_t a, int32x4_t b) A32: VSHL. U32 Qd, Qn, Qm A64: USHL Vd.4S, Vn.4S, Vm.4S |
ShiftLogical(Vector128<Int64>, Vector128<Int64>) |
uint64x2_t vshlq_u64 (uint64x2_t a, int64x2_t b) A32: VSHL. U64 Qd, Qn, Qm A64: USHL Vd.2D, Vn.2D, Vm.2D |
ShiftLogical(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vshlq_u8 (uint8x16_t a, int8x16_t b) A32: VSHL. U8 Qd, Qn, Qm A64: USHL Vd.16B, Vn.16B, Vm.16B |
ShiftLogical(Vector128<UInt16>, Vector128<Int16>) |
uint16x8_t vshlq_u16 (uint16x8_t a, int16x8_t b) A32: VSHL. U16 Qd, Qn, Qm A64: USHL Vd.8H, Vn.8H, Vm.8H |
ShiftLogical(Vector128<UInt32>, Vector128<Int32>) |
uint32x4_t vshlq_u32 (uint32x4_t a, int32x4_t b) A32: VSHL. U32 Qd, Qn, Qm A64: USHL Vd.4S, Vn.4S, Vm.4S |
ShiftLogical(Vector128<UInt64>, Vector128<Int64>) |
uint64x2_t vshlq_u64 (uint64x2_t a, int64x2_t b) A32: VSHL. U64 Qd, Qn, Qm A64: USHL Vd.2D, Vn.2D, Vm.2D |
ShiftLogical(Vector64<Byte>, Vector64<SByte>) |
uint8x8_t vshl_u8 (uint8x8_t a, int8x8_t b) A32: VSHL. U8 Dd, Dn, Dm A64: USHL Vd.8B, Vn.8B, Vm.8B |
ShiftLogical(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vshl_u16 (uint16x4_t a, int16x4_t b) A32: VSHL. U16 Dd, Dn, Dm A64: USHL Vd.4H, Vn.4H, Vm.4H |
ShiftLogical(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vshl_u32 (uint32x2_t a, int32x2_t b) A32: VSHL. U32 Dd, Dn, Dm A64: USHL Vd.2S, Vn.2S, Vm.2S |
ShiftLogical(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vshl_u8 (uint8x8_t a, int8x8_t b) A32: VSHL. U8 Dd, Dn, Dm A64: USHL Vd.8B, Vn.8B, Vm.8B |
ShiftLogical(Vector64<UInt16>, Vector64<Int16>) |
uint16x4_t vshl_u16 (uint16x4_t a, int16x4_t b) A32: VSHL. U16 Dd, Dn, Dm A64: USHL Vd.4H, Vn.4H, Vm.4H |
ShiftLogical(Vector64<UInt32>, Vector64<Int32>) |
uint32x2_t vshl_u32 (uint32x2_t a, int32x2_t b) A32: VSHL. U32 Dd, Dn, Dm A64: USHL Vd.2S, Vn.2S, Vm.2S |
ShiftLogicalRounded(Vector128<Byte>, Vector128<SByte>) |
uint8x16_t vrshlq_u8 (uint8x16_t a, int8x16_t b) A32: VRSHL. U8 Qd, Qn, Qm A64: URSHL Vd.16B, Vn.16B, Vm.16B |
ShiftLogicalRounded(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vrshlq_u16 (uint16x8_t a, int16x8_t b) A32: VRSHL. U16 Qd, Qn, Qm A64: URSHL Vd.8H, Vn.8H, Vm.8H |
ShiftLogicalRounded(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vrshlq_u32 (uint32x4_t a, int32x4_t b) A32: VRSHL. U32 Qd, Qn, Qm A64: URSHL Vd.4S, Vn.4S, Vm.4S |
ShiftLogicalRounded(Vector128<Int64>, Vector128<Int64>) |
uint64x2_t vrshlq_u64 (a, int64x2_t b uint64x2_t) A32: VRSHL. U64 Qd, Qn, Qm A64: URSHL Vd.2D, Vn.2D, Vm.2D |
ShiftLogicalRounded(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vrshlq_u8 (uint8x16_t a, int8x16_t b) A32: VRSHL. U8 Qd, Qn, Qm A64: URSHL Vd.16B, Vn.16B, Vm.16B |
ShiftLogicalRounded(Vector128<UInt16>, Vector128<Int16>) |
uint16x8_t vrshlq_u16 (uint16x8_t a, int16x8_t b) A32: VRSHL. U16 Qd, Qn, Qm A64: URSHL Vd.8H, Vn.8H, Vm.8H |
ShiftLogicalRounded(Vector128<UInt32>, Vector128<Int32>) |
uint32x4_t vrshlq_u32 (uint32x4_t a, int32x4_t b) A32: VRSHL. U32 Qd, Qn, Qm A64: URSHL Vd.4S, Vn.4S, Vm.4S |
ShiftLogicalRounded(Vector128<UInt64>, Vector128<Int64>) |
uint64x2_t vrshlq_u64 (a, int64x2_t b uint64x2_t) A32: VRSHL. U64 Qd, Qn, Qm A64: URSHL Vd.2D, Vn.2D, Vm.2D |
ShiftLogicalRounded(Vector64<Byte>, Vector64<SByte>) |
uint8x8_t vrshl_u8 (a, int8x8_t b uint8x8_t) A32: VRSHL. U8 Dd, Dn, Dm A64: URSHL Vd.8B, Vn.8B, Vm.8B |
ShiftLogicalRounded(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vrshl_u16 (uint16x4_t a, int16x4_t b) A32: VRSHL. U16 Dd, Dn, Dm A64: URSHL Vd.4H, Vn.4H, Vm.4H |
ShiftLogicalRounded(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vrshl_u32 (a, int32x2_t b uint32x2_t) A32: VRSHL. U32 Dd, Dn, Dm A64: URSHL Vd.2S, Vn.2S, Vm.2S |
ShiftLogicalRounded(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vrshl_u8 (a, int8x8_t b uint8x8_t) A32: VRSHL. U8 Dd, Dn, Dm A64: URSHL Vd.8B, Vn.8B, Vm.8B |
ShiftLogicalRounded(Vector64<UInt16>, Vector64<Int16>) |
uint16x4_t vrshl_u16 (uint16x4_t a, int16x4_t b) A32: VRSHL. U16 Dd, Dn, Dm A64: URSHL Vd.4H, Vn.4H, Vm.4H |
ShiftLogicalRounded(Vector64<UInt32>, Vector64<Int32>) |
uint32x2_t vrshl_u32 (a, int32x2_t b uint32x2_t) A32: VRSHL. U32 Dd, Dn, Dm A64: URSHL Vd.2S, Vn.2S, Vm.2S |
ShiftLogicalRoundedSaturate(Vector128<Byte>, Vector128<SByte>) |
uint8x16_t vqrshlq_u8 (uint8x16_t a, int8x16_t b) A32: VQRSHL. U8 Qd, Qn, Qm A64: UQRSHL Vd.16B, Vn.16B, Vm.16B |
ShiftLogicalRoundedSaturate(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vqrshlq_u16 (a, int16x8_t b uint16x8_t) A32: VQRSHL. U16 Qd, Qn, Qm A64: UQRSHL Vd.8H, Vn.8H, Vm.8H |
ShiftLogicalRoundedSaturate(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vqrshlq_u32 (uint32x4_t a, int32x4_t b) A32: VQRSHL. U32 Qd, Qn, Qm A64: UQRSHL Vd.4S, Vn.4S, Vm.4S |
ShiftLogicalRoundedSaturate(Vector128<Int64>, Vector128<Int64>) |
uint64x2_t vqrshlq_u64 (uint64x2_t a, int64x2_t b) A32: VQRSHL. U64 Qd, Qn, Qm A64: UQRSHL Vd.2D, Vn.2D, Vm.2D |
ShiftLogicalRoundedSaturate(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vqrshlq_u8 (uint8x16_t a, int8x16_t b) A32: VQRSHL. U8 Qd, Qn, Qm A64: UQRSHL Vd.16B, Vn.16B, Vm.16B |
ShiftLogicalRoundedSaturate(Vector128<UInt16>, Vector128<Int16>) |
uint16x8_t vqrshlq_u16 (a, int16x8_t b uint16x8_t) A32: VQRSHL. U16 Qd, Qn, Qm A64: UQRSHL Vd.8H, Vn.8H, Vm.8H |
ShiftLogicalRoundedSaturate(Vector128<UInt32>, Vector128<Int32>) |
uint32x4_t vqrshlq_u32 (uint32x4_t a, int32x4_t b) A32: VQRSHL. U32 Qd, Qn, Qm A64: UQRSHL Vd.4S, Vn.4S, Vm.4S |
ShiftLogicalRoundedSaturate(Vector128<UInt64>, Vector128<Int64>) |
uint64x2_t vqrshlq_u64 (uint64x2_t a, int64x2_t b) A32: VQRSHL. U64 Qd, Qn, Qm A64: UQRSHL Vd.2D, Vn.2D, Vm.2D |
ShiftLogicalRoundedSaturate(Vector64<Byte>, Vector64<SByte>) |
uint8x8_t vqrshl_u8 (uint8x8_t a, int8x8_t b) A32: VQRSHL. U8 Dd, Dn, Dm A64: UQRSHL Vd.8B, Vn.8B, Vm.8B |
ShiftLogicalRoundedSaturate(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vqrshl_u16 (uint16x4_t a, int16x4_t b) A32: VQRSHL. U16 Dd, Dn, Dm A64: UQRSHL Vd.4H, Vn.4H, Vm.4H |
ShiftLogicalRoundedSaturate(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vqrshl_u32 (uint32x2_t a, int32x2_t b) A32: VQRSHL. U32 Dd, Dn, Dm A64: UQRSHL Vd.2S, Vn.2S, Vm.2S |
ShiftLogicalRoundedSaturate(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vqrshl_u8 (uint8x8_t a, int8x8_t b) A32: VQRSHL. U8 Dd, Dn, Dm A64: UQRSHL Vd.8B, Vn.8B, Vm.8B |
ShiftLogicalRoundedSaturate(Vector64<UInt16>, Vector64<Int16>) |
uint16x4_t vqrshl_u16 (uint16x4_t a, int16x4_t b) A32: VQRSHL. U16 Dd, Dn, Dm A64: UQRSHL Vd.4H, Vn.4H, Vm.4H |
ShiftLogicalRoundedSaturate(Vector64<UInt32>, Vector64<Int32>) |
uint32x2_t vqrshl_u32 (uint32x2_t a, int32x2_t b) A32: VQRSHL. U32 Dd, Dn, Dm A64: UQRSHL Vd.2S, Vn.2S, Vm.2S |
ShiftLogicalRoundedSaturateScalar(Vector64<Int64>, Vector64<Int64>) |
uint64x1_t vqrshl_u64 (a, int64x1_t b uint64x1_t) A32: VQRSHL. U64 Dd, Dn, Dm A64: UQRSHL Dd, Dn, Dm |
ShiftLogicalRoundedSaturateScalar(Vector64<UInt64>, Vector64<Int64>) |
uint64x1_t vqrshl_u64 (a, int64x1_t b uint64x1_t) A32: VQRSHL. U64 Dd, Dn, Dm A64: UQRSHL Dd, Dn, Dm |
ShiftLogicalRoundedScalar(Vector64<Int64>, Vector64<Int64>) |
uint64x1_t vrshl_u64 (a, int64x1_t b uint64x1_t) A32: VRSHL. U64 Dd, Dn, Dm A64: URSHL Dd, Dn, Dm |
ShiftLogicalRoundedScalar(Vector64<UInt64>, Vector64<Int64>) |
uint64x1_t vrshl_u64 (a, int64x1_t b uint64x1_t) A32: VRSHL. U64 Dd, Dn, Dm A64: URSHL Dd, Dn, Dm |
ShiftLogicalSaturate(Vector128<Byte>, Vector128<SByte>) |
uint8x16_t vqshlq_u8 (a, int8x16_t b uint8x16_t) A32: VQSHL. U8 Qd, Qn, Qm A64: UQSHL Vd.16B, Vn.16B, Vm.16B |
ShiftLogicalSaturate(Vector128<Int16>, Vector128<Int16>) |
uint16x8_t vqshlq_u16 (uint16x8_t a, int16x8_t b) A32: VQSHL. U16 Qd, Qn, Qm A64: UQSHL Vd.8H, Vn.8H, Vm.8H |
ShiftLogicalSaturate(Vector128<Int32>, Vector128<Int32>) |
uint32x4_t vqshlq_u32 (uint32x4_t a, int32x4_t b) A32: VQSHL. U32 Qd, Qn, Qm A64: UQSHL Vd.4S, Vn.4S, Vm.4S |
ShiftLogicalSaturate(Vector128<Int64>, Vector128<Int64>) |
uint64x2_t vqshlq_u64 (uint64x2_t a, int64x2_t b) A32: VQSHL. U64 Qd, Qn, Qm A64: UQSHL Vd.2D, Vn.2D, Vm.2D |
ShiftLogicalSaturate(Vector128<SByte>, Vector128<SByte>) |
uint8x16_t vqshlq_u8 (uint8x16_t a, int8x16_t b) A32: VQSHL. U8 Qd, Qn, Qm A64: UQSHL Vd.16B, Vn.16B, Vm.16B |
ShiftLogicalSaturate(Vector128<UInt16>, Vector128<Int16>) |
uint16x8_t vqshlq_u16 (uint16x8_t a, int16x8_t b) A32: VQSHL. U16 Qd, Qn, Qm A64: UQSHL Vd.8H, Vn.8H, Vm.8H |
ShiftLogicalSaturate(Vector128<UInt32>, Vector128<Int32>) |
uint32x4_t vqshlq_u32 (uint32x4_t a, int32x4_t b) A32: VQSHL. U32 Qd, Qn, Qm A64: UQSHL Vd.4S, Vn.4S, Vm.4S |
ShiftLogicalSaturate(Vector128<UInt64>, Vector128<Int64>) |
uint64x2_t vqshlq_u64 (uint64x2_t a, int64x2_t b) A32: VQSHL. U64 Qd, Qn, Qm A64: UQSHL Vd.2D, Vn.2D, Vm.2D |
ShiftLogicalSaturate(Vector64<Byte>, Vector64<SByte>) |
uint8x8_t vqshl_u8 (uint8x8_t a, int8x8_t b) A32: VQSHL. U8 Dd, Dn, Dm A64: UQSHL Vd.8B, Vn.8B, Vm.8B |
ShiftLogicalSaturate(Vector64<Int16>, Vector64<Int16>) |
uint16x4_t vqshl_u16 (uint16x4_t a, int16x4_t b) A32: VQSHL. U16 Dd, Dn, Dm A64: UQSHL Vd.4H, Vn.4H, Vm.4H |
ShiftLogicalSaturate(Vector64<Int32>, Vector64<Int32>) |
uint32x2_t vqshl_u32 (uint32x2_t a, int32x2_t b) A32: VQSHL. U32 Dd, Dn, Dm A64: UQSHL Vd.2S, Vn.2S, Vm.2S |
ShiftLogicalSaturate(Vector64<SByte>, Vector64<SByte>) |
uint8x8_t vqshl_u8 (uint8x8_t a, int8x8_t b) A32: VQSHL. U8 Dd, Dn, Dm A64: UQSHL Vd.8B, Vn.8B, Vm.8B |
ShiftLogicalSaturate(Vector64<UInt16>, Vector64<Int16>) |
uint16x4_t vqshl_u16 (uint16x4_t a, int16x4_t b) A32: VQSHL. U16 Dd, Dn, Dm A64: UQSHL Vd.4H, Vn.4H, Vm.4H |
ShiftLogicalSaturate(Vector64<UInt32>, Vector64<Int32>) |
uint32x2_t vqshl_u32 (uint32x2_t a, int32x2_t b) A32: VQSHL. U32 Dd, Dn, Dm A64: UQSHL Vd.2S, Vn.2S, Vm.2S |
ShiftLogicalSaturateScalar(Vector64<Int64>, Vector64<Int64>) |
uint64x1_t vqshl_u64 (uint64x1_t a, int64x1_t b) A32: VQSHL. U64 Dd, Dn, Dm A64: UQSHL Dd, Dn, Dm |
ShiftLogicalSaturateScalar(Vector64<UInt64>, Vector64<Int64>) |
uint64x1_t vqshl_u64 (uint64x1_t a, int64x1_t b) A32: VQSHL. U64 Dd, Dn, Dm A64: UQSHL Dd, Dn, Dm |
ShiftLogicalScalar(Vector64<Int64>, Vector64<Int64>) |
uint64x1_t vshl_u64 (uint64x1_t a, int64x1_t b) A32: VSHL. U64 Dd, Dn, Dm A64: USHL Dd, Dn, Dm |
ShiftLogicalScalar(Vector64<UInt64>, Vector64<Int64>) |
uint64x1_t vshl_u64 (uint64x1_t a, int64x1_t b) A32: VSHL. U64 Dd, Dn, Dm A64: USHL Dd, Dn, Dm |
ShiftRightAndInsert(Vector128<Byte>, Vector128<Byte>, Byte) |
uint8x16_t vsriq_n_u8(uint8x16_t a, uint8x16_t b, __builtin_constant_p(n)) A32: VSRI.8 Qd, Qm, #n A64: SRI Vd.16B, Vn.16B, #n |
ShiftRightAndInsert(Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vsriq_n_s16(int16x8_t a, int16x8_t b, __builtin_constant_p(n)) A32: VSRI.16 Qd, Qm, #n A64: SRI Vd.8H, Vn.8H, #n |
ShiftRightAndInsert(Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vsriq_n_s32(int32x4_t a, int32x4_t b, __builtin_constant_p(n)) A32: VSRI.32 Qd, Qm, #n A64: SRI Vd.4S, Vn.4S, #n |
ShiftRightAndInsert(Vector128<Int64>, Vector128<Int64>, Byte) |
int64x2_t vsriq_n_s64(int64x2_t a, int64x2_t b, __builtin_constant_p(n)) A32: VSRI.64 Qd, Qm, #n A64: SRI Vd.2D, Vn.2D, #n |
ShiftRightAndInsert(Vector128<SByte>, Vector128<SByte>, Byte) |
int8x16_t vsriq_n_s8(int8x16_t a, int8x16_t b, __builtin_constant_p(n)) A32: VSRI.8 Qd, Qm, #n A64: SRI Vd.16B, Vn.16B, #n |
ShiftRightAndInsert(Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint16x8_t vsriq_n_u16(uint16x8_t a, uint16x8_t b, __builtin_constant_p(n)) A32: VSRI.16 Qd, Qm, #n A64: SRI Vd.8H, Vn.8H, #n |
ShiftRightAndInsert(Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint32x4_t vsriq_n_u32(uint32x4_t a, uint32x4_t b, __builtin_constant_p(n)) A32: VSRI.32 Qd, Qm, #n A64: SRI Vd.4S, Vn.4S, #n |
ShiftRightAndInsert(Vector128<UInt64>, Vector128<UInt64>, Byte) |
uint64x2_t vsriq_n_u64(uint64x2_t a, uint64x2_t b, __builtin_constant_p(n)) A32: VSRI.64 Qd, Qm, #n A64: SRI Vd.2D, Vn.2D, #n |
ShiftRightAndInsert(Vector64<Byte>, Vector64<Byte>, Byte) |
uint8x8_t vsri_n_u8(uint8x8_t a, uint8x8_t b, __builtin_constant_p(n)) A32: VSRI.8 Dd, Dm, #n A64: SRI Vd.8B, Vn.8B, #n |
ShiftRightAndInsert(Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vsri_n_s16(int16x4_t a, int16x4_t b, __builtin_constant_p(n)) A32: VSRI.16 Dd, Dm, #n A64: SRI Vd.4H, Vn.4H, #n |
ShiftRightAndInsert(Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vsri_n_s32(int32x2_t a, int32x2_t b, __builtin_constant_p(n)) A32: VSRI.32 Dd, Dm, #n A64: SRI Vd.2S, Vn.2S, #n |
ShiftRightAndInsert(Vector64<SByte>, Vector64<SByte>, Byte) |
int8x8_t vsri_n_s8(int8x8_t a, int8x8_t b, __builtin_constant_p(n)) A32: VSRI.8 Dd, Dm, #n A64: SRI Vd.8B, Vn.8B, #n |
ShiftRightAndInsert(Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint16x4_t vsri_n_u16(uint16x4_t a, uint16x4_t b, __builtin_constant_p(n)) A32: VSRI.16 Dd, Dm, #n A64: SRI Vd.4H, Vn.4H, #n |
ShiftRightAndInsert(Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint32x2_t vsri_n_u32(uint32x2_t a, uint32x2_t b, __builtin_constant_p(n)) A32: VSRI.32 Dd, Dm, #n A64: SRI Vd.2S, Vn.2S, #n |
ShiftRightAndInsertScalar(Vector64<Int64>, Vector64<Int64>, Byte) |
int64_t vsrid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n)) A32: VSRI.64 Dd, Dm, #n A64: SRI Dd, Dn, #n |
ShiftRightAndInsertScalar(Vector64<UInt64>, Vector64<UInt64>, Byte) |
uint64_t vsrid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n)) A32: VSRI.64 Dd, Dm, #n A64: SRI Dd, Dn, #n |
ShiftRightArithmetic(Vector128<Int16>, Byte) |
int16x8_t vshrq_n_s16 (a, const int n int16x8_t) A32: VSHR. S16 Qd, Qm, #n A64: SSHR Vd.8H, Vn.8H, #n |
ShiftRightArithmetic(Vector128<Int32>, Byte) |
int32x4_t vshrq_n_s32 (a, const int n int32x4_t) A32: VSHR. S32 Qd, Qm, #n A64: SSHR Vd.4S, Vn.4S, #n |
ShiftRightArithmetic(Vector128<Int64>, Byte) |
int64x2_t vshrq_n_s64 (a, const int n int64x2_t) A32: VSHR. S64 Qd, Qm, #n A64: SSHR Vd.2D, Vn.2D, #n |
ShiftRightArithmetic(Vector128<SByte>, Byte) |
int8x16_t vshrq_n_s8 (a, const int n int8x16_t) A32: VSHR. S8 Qd, Qm, #n A64: SSHR Vd.16B, Vn.16B, #n |
ShiftRightArithmetic(Vector64<Int16>, Byte) |
int16x4_t vshr_n_s16 (a, const int n int16x4_t) A32: VSHR. S16 Dd, Dm, #n A64: SSHR Vd.4H, Vn.4H, #n |
ShiftRightArithmetic(Vector64<Int32>, Byte) |
int32x2_t vshr_n_s32 (a, const int n int32x2_t) A32: VSHR. S32 Dd, Dm, #n A64: SSHR Vd.2S, Vn.2S, #n |
ShiftRightArithmetic(Vector64<SByte>, Byte) |
int8x8_t vshr_n_s8 (int8x8_t a, const int n) A32: VSHR. S8 Dd, Dm, #n A64: SSHR Vd.8B, Vn.8B, #n |
ShiftRightArithmeticAdd(Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vsraq_n_s16 (a, int16x8_t b, const int n int16x8_t) A32: VSRA. S16 Qd, Qm, #n A64: SSRA Vd.8H, Vn.8H, #n |
ShiftRightArithmeticAdd(Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vsraq_n_s32 (int32x4_t a, int32x4_t b, const int n) A32: VSRA. S32 Qd, Qm, #n A64: SSRA Vd.4S, Vn.4S, #n |
ShiftRightArithmeticAdd(Vector128<Int64>, Vector128<Int64>, Byte) |
int64x2_t vsraq_n_s64 (a, int64x2_t b, const int n int64x2_t) A32: VSRA. S64 Qd, Qm, #n A64: SSRA Vd.2D, Vn.2D, #n |
ShiftRightArithmeticAdd(Vector128<SByte>, Vector128<SByte>, Byte) |
int8x16_t vsraq_n_s8 (int8x16_t a, int8x16_t b, const int n) A32: VSRA. S8 Qd, Qm, #n A64: SSRA Vd.16B, Vn.16B, #n |
ShiftRightArithmeticAdd(Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vsra_n_s16 (int16x4_t a, int16x4_t b, const int n) A32: VSRA. S16 Dd, Dm, #n A64: SSRA Vd.4H, Vn.4H, #n |
ShiftRightArithmeticAdd(Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vsra_n_s32 (a, int32x2_t b, const int n int32x2_t) A32: VSRA. S32 Dd, Dm, #n A64: SSRA Vd.2S, Vn.2S, #n |
ShiftRightArithmeticAdd(Vector64<SByte>, Vector64<SByte>, Byte) |
int8x8_t vsra_n_s8 (int8x8_t a, int8x8_t b, const int n) A32: VSRA. S8 Dd, Dm, #n A64: SSRA Vd.8B, Vn.8B, #n |
ShiftRightArithmeticAddScalar(Vector64<Int64>, Vector64<Int64>, Byte) |
int64x1_t vsra_n_s64 (int64x1_t a, int64x1_t b, const int n) A32: VSRA. S64 Dd, Dm, #n A64: SSRA Dd, Dn, #n |
ShiftRightArithmeticNarrowingSaturateLower(Vector128<Int16>, Byte) |
int8x8_t vqshrn_n_s16 (a, const int n int16x8_t) A32: VQSHRN. S16 Dd, Qm, #n A64: SQSHRN Vd.8B, Vn.8H, #n |
ShiftRightArithmeticNarrowingSaturateLower(Vector128<Int32>, Byte) |
int16x4_t vqshrn_n_s32 (int32x4_t a, const int n) A32: VQSHRN. S32 Dd, Qm, #n A64: SQSHRN Vd.4H, Vn.4S, #n |
ShiftRightArithmeticNarrowingSaturateLower(Vector128<Int64>, Byte) |
int32x2_t vqshrn_n_s64 (a, const int n int64x2_t) A32: VQSHRN. S64 Dd, Qm, #n A64: SQSHRN Vd.2S, Vn.2D, #n |
ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<Int16>, Byte) |
uint8x8_t vqshrun_n_s16 (a, const int n int16x8_t) A32: VQSHRUN. S16 Dd, Qm, #n A64: SQSHRUN Vd.8B, Vn.8H, #n |
ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<Int32>, Byte) |
uint16x4_t vqshrun_n_s32 (a, const int n int32x4_t) A32: VQSHRUN. S32 Dd, Qm, #n A64: SQSHRUN Vd.4H, Vn.4S, #n |
ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<Int64>, Byte) |
uint32x2_t vqshrun_n_s64 (a, const int n int64x2_t) A32: VQSHRUN. S64 Dd, Qm, #n A64: SQSHRUN Vd.2S, Vn.2D, #n |
ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<Byte>, Vector128<Int16>, Byte) |
uint8x16_t vqshrun_high_n_s16 (uint8x8_t r, int16x8_t a, const int n) A32: VQSHRUN. S16 Dd+1, Dn, #n A64: SQSHRUN2 Vd.16B, Vn.8H, #n |
ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<UInt16>, Vector128<Int32>, Byte) |
uint16x8_t vqshrun_high_n_s32 (uint16x4_t r, int32x4_t a, const int n) A32: VQSHRUN. S32 Dd+1, Dn, #n A64: SQSHRUN2 Vd.8H, Vn.4S, #n |
ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<UInt32>, Vector128<Int64>, Byte) |
uint32x4_t vqshrun_high_n_s64 (uint32x2_t r, int64x2_t a, const int n) A32: VQSHRUN. S64 Dd+1, Dn, #n A64: SQSHRUN2 Vd.4S, Vn.2D, #n |
ShiftRightArithmeticNarrowingSaturateUpper(Vector64<Int16>, Vector128<Int32>, Byte) |
int16x8_t vqshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) A32: VQSHRN. S32 Dd+1, Qm, #n A64: SQSHRN2 Vd.8H, Vn.4S, #n |
ShiftRightArithmeticNarrowingSaturateUpper(Vector64<Int32>, Vector128<Int64>, Byte) |
int32x4_t vqshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) A32: VQSHRN. S64 Dd+1, Qm, #n A64: SQSHRN2 Vd.4S, Vn.2D, #n |
ShiftRightArithmeticNarrowingSaturateUpper(Vector64<SByte>, Vector128<Int16>, Byte) |
int8x16_t vqshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) A32: VQSHRN. S16 Dd+1, Qm, #n A64: SQSHRN2 Vd.16B, Vn.8H, #n |
ShiftRightArithmeticRounded(Vector128<Int16>, Byte) |
int16x8_t vrshrq_n_s16 (int16x8_t a, const int n) A32: VRSHR. S16 Qd, Qm, #n A64: SRSHR Vd.8H, Vn.8H, #n |
ShiftRightArithmeticRounded(Vector128<Int32>, Byte) |
int32x4_t vrshrq_n_s32 (int32x4_t a, const int n) A32: VRSHR. S32 Qd, Qm, #n A64: SRSHR Vd.4S, Vn.4S, #n |
ShiftRightArithmeticRounded(Vector128<Int64>, Byte) |
int64x2_t vrshrq_n_s64 (a, const int n int64x2_t) A32: VRSHR. S64 Qd, Qm, #n A64: SRSHR Vd.2D, Vn.2D, #n |
ShiftRightArithmeticRounded(Vector128<SByte>, Byte) |
int8x16_t vrshrq_n_s8 (a, const int n int8x16_t) A32: VRSHR. S8 Qd, Qm, #n A64: SRSHR Vd.16B, Vn.16B, #n |
ShiftRightArithmeticRounded(Vector64<Int16>, Byte) |
int16x4_t vrshr_n_s16 (a, const int n int16x4_t) A32: VRSHR. S16 Dd, Dm, #n A64: SRSHR Vd.4H, Vn.4H, #n |
ShiftRightArithmeticRounded(Vector64<Int32>, Byte) |
int32x2_t vrshr_n_s32 (int32x2_t a, const int n) A32: VRSHR. S32 Dd, Dm, #n A64: SRSHR Vd.2S, Vn.2S, #n |
ShiftRightArithmeticRounded(Vector64<SByte>, Byte) |
int8x8_t vrshr_n_s8 (a, const int n int8x8_t) A32: VRSHR. S8 Dd, Dm, #n A64: SRSHR Vd.8B, Vn.8B, #n |
ShiftRightArithmeticRoundedAdd(Vector128<Int16>, Vector128<Int16>, Byte) |
int16x8_t vrsraq_n_s16 (int16x8_t a, int16x8_t b, const int n) A32: VRSRA. S16 Qd, Qm, #n A64: SRSRA Vd.8H, Vn.8H, #n |
ShiftRightArithmeticRoundedAdd(Vector128<Int32>, Vector128<Int32>, Byte) |
int32x4_t vrsraq_n_s32 (a, int32x4_t b, const int n int32x4_t) A32: VRSRA. S32 Qd, Qm, #n A64: SRSRA Vd.4S, Vn.4S, #n |
ShiftRightArithmeticRoundedAdd(Vector128<Int64>, Vector128<Int64>, Byte) |
int64x2_t vrsraq_n_s64 (int64x2_t a, int64x2_t b, const int n) A32: VRSRA. S64 Qd, Qm, #n A64: SRSRA Vd.2D, Vn.2D, #n |
ShiftRightArithmeticRoundedAdd(Vector128<SByte>, Vector128<SByte>, Byte) |
int8x16_t vrsraq_n_s8 (int8x16_t a, int8x16_t b, const int n) A32: VRSRA. S8 Qd, Qm, #n A64: SRSRA Vd.16B, Vn.16B, #n |
ShiftRightArithmeticRoundedAdd(Vector64<Int16>, Vector64<Int16>, Byte) |
int16x4_t vrsra_n_s16 (int16x4_t a, int16x4_t b, const int n) A32: VRSRA. S16 Dd, Dm, #n A64: SRSRA Vd.4H, Vn.4H, #n |
ShiftRightArithmeticRoundedAdd(Vector64<Int32>, Vector64<Int32>, Byte) |
int32x2_t vrsra_n_s32 (int32x2_t a, int32x2_t b, const int n) A32: VRSRA. S32 Dd, Dm, #n A64: SRSRA Vd.2S, Vn.2S, #n |
ShiftRightArithmeticRoundedAdd(Vector64<SByte>, Vector64<SByte>, Byte) |
int8x8_t vrsra_n_s8 (a, int8x8_t b, const int n int8x8_t) A32: VRSRA. S8 Dd, Dm, #n A64: SRSRA Vd.8B, Vn.8B, #n |
ShiftRightArithmeticRoundedAddScalar(Vector64<Int64>, Vector64<Int64>, Byte) |
int64x1_t vrsra_n_s64 (int64x1_t a, int64x1_t b, const int n) A32: VRSRA. S64 Dd, Dm, #n A64: SRSRA Dd, Dn, #n |
ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<Int16>, Byte) |
int8x8_t vqrshrn_n_s16 (a, const int n int16x8_t) A32: VQRSHRN. S16 Dd, Qm, #n A64: SQRSHRN Vd.8B, Vn.8H, #n |
ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<Int32>, Byte) |
int16x4_t vqrshrn_n_s32 (a, const int n int32x4_t) A32: VQRSHRN. S32 Dd, Qm, #n A64: SQRSHRN Vd.4H, Vn.4S, #n |
ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<Int64>, Byte) |
int32x2_t vqrshrn_n_s64 (a, const int n int64x2_t) A32: VQRSHRN. S64 Dd, Qm, #n A64: SQRSHRN Vd.2S, Vn.2D, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<Int16>, Byte) |
uint8x8_t vqrshrun_n_s16 (a, const int n int16x8_t) A32: VQRSHRUN. S16 Dd, Qm, #n A64: SQRSHRUN Vd.8B, Vn.8H, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<Int32>, Byte) |
uint16x4_t vqrshrun_n_s32 (a, const int n int32x4_t) A32: VQRSHRUN. S32 Dd, Qm, #n A64: SQRSHRUN Vd.4H, Vn.4S, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<Int64>, Byte) |
uint32x2_t vqrshrun_n_s64 (a, const int n int64x2_t) A32: VQRSHRUN. S64 Dd, Qm, #n A64: SQRSHRUN Vd.2S, Vn.2D, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<Byte>, Vector128<Int16>, Byte) |
uint8x16_t vqrshrun_high_n_s16 (uint8x8_t r, int16x8_t a, const int n) A32: VQRSHRUN. S16 Dd+1, Dn, #n A64: SQRSHRUN2 Vd.16B, Vn.8H, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<UInt16>, Vector128<Int32>, Byte) |
uint16x8_t vqrshrun_high_n_s32 (uint16x4_t r, int32x4_t a, const int n) A32: VQRSHRUN. S32 Dd+1, Dn, #n A64: SQRSHRUN2 Vd.8H, Vn.4S, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<UInt32>, Vector128<Int64>, Byte) |
uint32x4_t vqrshrun_high_n_s64 (uint32x2_t r, int64x2_t a, const int n) A32: VQRSHRUN. S64 Dd+1, Dn, #n A64: SQRSHRUN2 Vd.4S, Vn.2D, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<Int16>, Vector128<Int32>, Byte) |
int16x8_t vqrshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) A32: VQRSHRN. S32 Dd+1, Dn, #n A64: SQRSHRN2 Vd.8H, Vn.4S, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<Int32>, Vector128<Int64>, Byte) |
int32x4_t vqrshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) A32: VQRSHRN. S64 Dd+1, Dn, #n A64: SQRSHRN2 Vd.4S, Vn.2D, #n |
ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<SByte>, Vector128<Int16>, Byte) |
int8x16_t vqrshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) A32: VQRSHRN. S16 Dd+1, Dn, #n A64: SQRSHRN2 Vd.16B, Vn.8H, #n |
ShiftRightArithmeticRoundedScalar(Vector64<Int64>, Byte) |
int64x1_t vrshr_n_s64 (a, const int n int64x1_t) A32: VRSHR. S64 Dd, Dm, #n A64: SRSHR Dd, Dn, #n |
ShiftRightArithmeticScalar(Vector64<Int64>, Byte) |
int64x1_t vshr_n_s64 (a, const int n int64x1_t) A32: VSHR. S64 Dd, Dm, #n A64: SSHR Dd, Dn, #n |
ShiftRightLogical(Vector128<Byte>, Byte) |
uint8x16_t vshrq_n_u8 (uint8x16_t a, const int n) A32: VSHR. U8 Qd, Qm, #n A64: USHR Vd.16B, Vn.16B, #n |
ShiftRightLogical(Vector128<Int16>, Byte) |
uint16x8_t vshrq_n_u16 (a, const int n uint16x8_t) A32: VSHR. U16 Qd, Qm, #n A64: USHR Vd.8H, Vn.8H, #n |
ShiftRightLogical(Vector128<Int32>, Byte) |
uint32x4_t vshrq_n_u32 (a, const int n uint32x4_t) A32: VSHR. U32 Qd, Qm, #n A64: USHR Vd.4S, Vn.4S, #n |
ShiftRightLogical(Vector128<Int64>, Byte) |
uint64x2_t vshrq_n_u64 (a, const int n uint64x2_t) A32: VSHR. U64 Qd, Qm, #n A64: USHR Vd.2D, Vn.2D, #n |
ShiftRightLogical(Vector128<SByte>, Byte) |
uint8x16_t vshrq_n_u8 (uint8x16_t a, const int n) A32: VSHR. U8 Qd, Qm, #n A64: USHR Vd.16B, Vn.16B, #n |
ShiftRightLogical(Vector128<UInt16>, Byte) |
uint16x8_t vshrq_n_u16 (a, const int n uint16x8_t) A32: VSHR. U16 Qd, Qm, #n A64: USHR Vd.8H, Vn.8H, #n |
ShiftRightLogical(Vector128<UInt32>, Byte) |
uint32x4_t vshrq_n_u32 (a, const int n uint32x4_t) A32: VSHR. U32 Qd, Qm, #n A64: USHR Vd.4S, Vn.4S, #n |
ShiftRightLogical(Vector128<UInt64>, Byte) |
uint64x2_t vshrq_n_u64 (a, const int n uint64x2_t) A32: VSHR. U64 Qd, Qm, #n A64: USHR Vd.2D, Vn.2D, #n |
ShiftRightLogical(Vector64<Byte>, Byte) |
uint8x8_t vshr_n_u8 (uint8x8_t a, const int n) A32: VSHR. U8 Dd, Dm, #n A64: USHR Vd.8B, Vn.8B, #n |
ShiftRightLogical(Vector64<Int16>, Byte) |
uint16x4_t vshr_n_u16 (a, const int n uint16x4_t) A32: VSHR. U16 Dd, Dm, #n A64: USHR Vd.4H, Vn.4H, #n |
ShiftRightLogical(Vector64<Int32>, Byte) |
uint32x2_t vshr_n_u32 (a, const int n uint32x2_t) A32: VSHR. U32 Dd, Dm, #n A64: USHR Vd.2S, Vn.2S, #n |
ShiftRightLogical(Vector64<SByte>, Byte) |
uint8x8_t vshr_n_u8 (uint8x8_t a, const int n) A32: VSHR. U8 Dd, Dm, #n A64: USHR Vd.8B, Vn.8B, #n |
ShiftRightLogical(Vector64<UInt16>, Byte) |
uint16x4_t vshr_n_u16 (a, const int n uint16x4_t) A32: VSHR. U16 Dd, Dm, #n A64: USHR Vd.4H, Vn.4H, #n |
ShiftRightLogical(Vector64<UInt32>, Byte) |
uint32x2_t vshr_n_u32 (a, const int n uint32x2_t) A32: VSHR. U32 Dd, Dm, #n A64: USHR Vd.2S, Vn.2S, #n |
ShiftRightLogicalAdd(Vector128<Byte>, Vector128<Byte>, Byte) |
uint8x16_t vsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) A32: VSRA. U8 Qd, Qm, #n A64: USRA Vd.16B, Vn.16B, #n |
ShiftRightLogicalAdd(Vector128<Int16>, Vector128<Int16>, Byte) |
uint16x8_t vsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) A32: VSRA. U16 Qd, Qm, #n A64: USRA Vd.8H, Vn.8H, #n |
ShiftRightLogicalAdd(Vector128<Int32>, Vector128<Int32>, Byte) |
uint32x4_t vsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) A32: VSRA. U32 Qd, Qm, #n A64: USRA Vd.4S, Vn.4S, #n |
ShiftRightLogicalAdd(Vector128<Int64>, Vector128<Int64>, Byte) |
uint64x2_t vsraq_n_u64 (a, uint64x2_t b, const int n uint64x2_t) A32: VSRA. U64 Qd, Qm, #n A64: USRA Vd.2D, Vn.2D, #n |
ShiftRightLogicalAdd(Vector128<SByte>, Vector128<SByte>, Byte) |
uint8x16_t vsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) A32: VSRA. U8 Qd, Qm, #n A64: USRA Vd.16B, Vn.16B, #n |
ShiftRightLogicalAdd(Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint16x8_t vsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) A32: VSRA. U16 Qd, Qm, #n A64: USRA Vd.8H, Vn.8H, #n |
ShiftRightLogicalAdd(Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint32x4_t vsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) A32: VSRA. U32 Qd, Qm, #n A64: USRA Vd.4S, Vn.4S, #n |
ShiftRightLogicalAdd(Vector128<UInt64>, Vector128<UInt64>, Byte) |
uint64x2_t vsraq_n_u64 (a, uint64x2_t b, const int n uint64x2_t) A32: VSRA. U64 Qd, Qm, #n A64: USRA Vd.2D, Vn.2D, #n |
ShiftRightLogicalAdd(Vector64<Byte>, Vector64<Byte>, Byte) |
uint8x8_t vsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) A32: VSRA. U8 Dd, Dm, #n A64: USRA Vd.8B, Vn.8B, #n |
ShiftRightLogicalAdd(Vector64<Int16>, Vector64<Int16>, Byte) |
uint16x4_t vsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) A32: VSRA. U16 Dd, Dm, #n A64: USRA Vd.4H, Vn.4H, #n |
ShiftRightLogicalAdd(Vector64<Int32>, Vector64<Int32>, Byte) |
uint32x2_t vsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) A32: VSRA. U32 Dd, Dm, #n A64: USRA Vd.2S, Vn.2S, #n |
ShiftRightLogicalAdd(Vector64<SByte>, Vector64<SByte>, Byte) |
uint8x8_t vsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) A32: VSRA. U8 Dd, Dm, #n A64: USRA Vd.8B, Vn.8B, #n |
ShiftRightLogicalAdd(Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint16x4_t vsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) A32: VSRA. U16 Dd, Dm, #n A64: USRA Vd.4H, Vn.4H, #n |
ShiftRightLogicalAdd(Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint32x2_t vsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) A32: VSRA. U32 Dd, Dm, #n A64: USRA Vd.2S, Vn.2S, #n |
ShiftRightLogicalAddScalar(Vector64<Int64>, Vector64<Int64>, Byte) |
uint64x1_t vsra_n_u64 (a, uint64x1_t b, const int n uint64x1_t) A32: VSRA. U64 Dd, Dm, #n A64: USRA Dd, Dn, #n |
ShiftRightLogicalAddScalar(Vector64<UInt64>, Vector64<UInt64>, Byte) |
uint64x1_t vsra_n_u64 (a, uint64x1_t b, const int n uint64x1_t) A32: VSRA. U64 Dd, Dm, #n A64: USRA Dd, Dn, #n |
ShiftRightLogicalNarrowingLower(Vector128<Int16>, Byte) |
int8x8_t vshrn_n_s16 (a, const int n int16x8_t) A32: VSHRN. I16 Dd, Qm, #n A64: SHRN Vd.8B, Vn.8H, #n |
ShiftRightLogicalNarrowingLower(Vector128<Int32>, Byte) |
int16x4_t vshrn_n_s32 (int32x4_t a, const int n) A32: VSHRN. I32 Dd, Qm, #n A64: SHRN Vd.4H, Vn.4S, #n |
ShiftRightLogicalNarrowingLower(Vector128<Int64>, Byte) |
int32x2_t vshrn_n_s64 (a, const int n int64x2_t) A32: VSHRN. I64 Dd, Qm, #n A64: SHRN Vd.2S, Vn.2D, #n |
ShiftRightLogicalNarrowingLower(Vector128<UInt16>, Byte) |
uint8x8_t vshrn_n_u16 (uint16x8_t a, const int n) A32: VSHRN. I16 Dd, Qm, #n A64: SHRN Vd.8B, Vn.8H, #n |
ShiftRightLogicalNarrowingLower(Vector128<UInt32>, Byte) |
uint16x4_t vshrn_n_u32 (a, const int n uint32x4_t) A32: VSHRN. I32 Dd, Qm, #n A64: SHRN Vd.4H, Vn.4S, #n |
ShiftRightLogicalNarrowingLower(Vector128<UInt64>, Byte) |
uint32x2_t vshrn_n_u64 (a, const int n uint64x2_t) A32: VSHRN. I64 Dd, Qm, #n A64: SHRN Vd.2S, Vn.2D, #n |
ShiftRightLogicalNarrowingSaturateLower(Vector128<Int16>, Byte) |
uint8x8_t vqshrn_n_u16 (a, const int n uint16x8_t) A32: VQSHRN. U16 Dd, Qm, #n A64: UQSHRN Vd.8B, Vn.8H, #n |
ShiftRightLogicalNarrowingSaturateLower(Vector128<Int32>, Byte) |
uint16x4_t vqshrn_n_u32 (uint32x4_t a, const int n) A32: VQSHRN. U32 Dd, Qm, #n A64: UQSHRN Vd.4H, Vn.4S, #n |
ShiftRightLogicalNarrowingSaturateLower(Vector128<Int64>, Byte) |
uint32x2_t vqshrn_n_u64 (uint64x2_t a, const int n) A32: VQSHRN. U64 Dd, Qm, #n A64: UQSHRN Vd.2S, Vn.2D, #n |
ShiftRightLogicalNarrowingSaturateLower(Vector128<UInt16>, Byte) |
uint8x8_t vqshrn_n_u16 (a, const int n uint16x8_t) A32: VQSHRN. U16 Dd, Qm, #n A64: UQSHRN Vd.8B, Vn.8H, #n |
ShiftRightLogicalNarrowingSaturateLower(Vector128<UInt32>, Byte) |
uint16x4_t vqshrn_n_u32 (uint32x4_t a, const int n) A32: VQSHRN. U32 Dd, Qm, #n A64: UQSHRN Vd.4H, Vn.4S, #n |
ShiftRightLogicalNarrowingSaturateLower(Vector128<UInt64>, Byte) |
uint32x2_t vqshrn_n_u64 (uint64x2_t a, const int n) A32: VQSHRN. U64 Dd, Qm, #n A64: UQSHRN Vd.2S, Vn.2D, #n |
ShiftRightLogicalNarrowingSaturateUpper(Vector64<Byte>, Vector128<UInt16>, Byte) |
uint8x16_t vqshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) A32: VQSHRN. U16 Dd+1, Qm, #n A64: UQSHRN2 Vd.16B, Vn.8H, #n |
ShiftRightLogicalNarrowingSaturateUpper(Vector64<Int16>, Vector128<Int32>, Byte) |
uint16x8_t vqshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) A32: VQSHRN. U32 Dd+1, Qm, #n A64: UQSHRN2 Vd.8H, Vn.4S, #n |
ShiftRightLogicalNarrowingSaturateUpper(Vector64<Int32>, Vector128<Int64>, Byte) |
uint32x4_t vqshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) A32: VQSHRN. U64 Dd+1, Qm, #n A64: UQSHRN2 Vd.4S, Vn.2D, #n |
ShiftRightLogicalNarrowingSaturateUpper(Vector64<SByte>, Vector128<Int16>, Byte) |
uint8x16_t vqshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) A32: VQSHRN. U16 Dd+1, Qm, #n A64: UQSHRN2 Vd.16B, Vn.8H, #n |
ShiftRightLogicalNarrowingSaturateUpper(Vector64<UInt16>, Vector128<UInt32>, Byte) |
uint16x8_t vqshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) A32: VQSHRN. U32 Dd+1, Qm, #n A64: UQSHRN2 Vd.8H, Vn.4S, #n |
ShiftRightLogicalNarrowingSaturateUpper(Vector64<UInt32>, Vector128<UInt64>, Byte) |
uint32x4_t vqshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) A32: VQSHRN. U64 Dd+1, Qm, #n A64: UQSHRN2 Vd.4S, Vn.2D, #n |
ShiftRightLogicalNarrowingUpper(Vector64<Byte>, Vector128<UInt16>, Byte) |
uint8x16_t vshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) A32: VSHRN. I16 Dd+1, Qm, #n A64: SHRN2 Vd.16B, Vn.8H, #n |
ShiftRightLogicalNarrowingUpper(Vector64<Int16>, Vector128<Int32>, Byte) |
int16x8_t vshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) A32: VSHRN. I32 Dd+1, Qm, #n A64: SHRN2 Vd.8H, Vn.4S, #n |
ShiftRightLogicalNarrowingUpper(Vector64<Int32>, Vector128<Int64>, Byte) |
int32x4_t vshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) A32: VSHRN. I64 Dd+1, Qm, #n A64: SHRN2 Vd.4S, Vn.2D, #n |
ShiftRightLogicalNarrowingUpper(Vector64<SByte>, Vector128<Int16>, Byte) |
int8x16_t vshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) A32: VSHRN. I16 Dd+1, Qm, #n A64: SHRN2 Vd.16B, Vn.8H, #n |
ShiftRightLogicalNarrowingUpper(Vector64<UInt16>, Vector128<UInt32>, Byte) |
uint16x8_t vshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) A32: VSHRN. I32 Dd+1, Qm, #n A64: SHRN2 Vd.8H, Vn.4S, #n |
ShiftRightLogicalNarrowingUpper(Vector64<UInt32>, Vector128<UInt64>, Byte) |
uint32x4_t vshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) A32: VSHRN. I64 Dd+1, Qm, #n A64: SHRN2 Vd.4S, Vn.2D, #n |
ShiftRightLogicalRounded(Vector128<Byte>, Byte) |
uint8x16_t vrshrq_n_u8 (a, const int n uint8x16_t) A32: VRSHR. U8 Qd, Qm, #n A64: URSHR Vd.16B, Vn.16B, #n |
ShiftRightLogicalRounded(Vector128<Int16>, Byte) |
uint16x8_t vrshrq_n_u16 (a, const int n uint16x8_t) A32: VRSHR. U16 Qd, Qm, #n A64: URSHR Vd.8H, Vn.8H, #n |
ShiftRightLogicalRounded(Vector128<Int32>, Byte) |
uint32x4_t vrshrq_n_u32 (a, const int n uint32x4_t) A32: VRSHR. U32 Qd, Qm, #n A64: URSHR Vd.4S, Vn.4S, #n |
ShiftRightLogicalRounded(Vector128<Int64>, Byte) |
uint64x2_t vrshrq_n_u64 (a, const int n uint64x2_t) A32: VRSHR. U64 Qd, Qm, #n A64: URSHR Vd.2D, Vn.2D, #n |
ShiftRightLogicalRounded(Vector128<SByte>, Byte) |
uint8x16_t vrshrq_n_u8 (a, const int n uint8x16_t) A32: VRSHR. U8 Qd, Qm, #n A64: URSHR Vd.16B, Vn.16B, #n |
ShiftRightLogicalRounded(Vector128<UInt16>, Byte) |
uint16x8_t vrshrq_n_u16 (a, const int n uint16x8_t) A32: VRSHR. U16 Qd, Qm, #n A64: URSHR Vd.8H, Vn.8H, #n |
ShiftRightLogicalRounded(Vector128<UInt32>, Byte) |
uint32x4_t vrshrq_n_u32 (a, const int n uint32x4_t) A32: VRSHR. U32 Qd, Qm, #n A64: URSHR Vd.4S, Vn.4S, #n |
ShiftRightLogicalRounded(Vector128<UInt64>, Byte) |
uint64x2_t vrshrq_n_u64 (a, const int n uint64x2_t) A32: VRSHR. U64 Qd, Qm, #n A64: URSHR Vd.2D, Vn.2D, #n |
ShiftRightLogicalRounded(Vector64<Byte>, Byte) |
uint8x8_t vrshr_n_u8 (a, const int n uint8x8_t) A32: VRSHR. U8 Dd, Dm, #n A64: URSHR Vd.8B, Vn.8B, #n |
ShiftRightLogicalRounded(Vector64<Int16>, Byte) |
uint16x4_t vrshr_n_u16 (a, const int n uint16x4_t) A32: VRSHR. U16 Dd, Dm, #n A64: URSHR Vd.4H, Vn.4H, #n |
ShiftRightLogicalRounded(Vector64<Int32>, Byte) |
uint32x2_t vrshr_n_u32 (a, const int n uint32x2_t) A32: VRSHR. U32 Dd, Dm, #n A64: URSHR Vd.2S, Vn.2S, #n |
ShiftRightLogicalRounded(Vector64<SByte>, Byte) |
uint8x8_t vrshr_n_u8 (a, const int n uint8x8_t) A32: VRSHR. U8 Dd, Dm, #n A64: URSHR Vd.8B, Vn.8B, #n |
ShiftRightLogicalRounded(Vector64<UInt16>, Byte) |
uint16x4_t vrshr_n_u16 (a, const int n uint16x4_t) A32: VRSHR. U16 Dd, Dm, #n A64: URSHR Vd.4H, Vn.4H, #n |
ShiftRightLogicalRounded(Vector64<UInt32>, Byte) |
uint32x2_t vrshr_n_u32 (a, const int n uint32x2_t) A32: VRSHR. U32 Dd, Dm, #n A64: URSHR Vd.2S, Vn.2S, #n |
ShiftRightLogicalRoundedAdd(Vector128<Byte>, Vector128<Byte>, Byte) |
uint8x16_t vrsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) A32: VRSRA. U8 Qd, Qm, #n A64: URSRA Vd.16B, Vn.16B, #n |
ShiftRightLogicalRoundedAdd(Vector128<Int16>, Vector128<Int16>, Byte) |
uint16x8_t vrsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) A32: VRSRA. U16 Qd, Qm, #n A64: URSRA Vd.8H, Vn.8H, #n |
ShiftRightLogicalRoundedAdd(Vector128<Int32>, Vector128<Int32>, Byte) |
uint32x4_t vrsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) A32: VRSRA. U32 Qd, Qm, #n A64: URSRA Vd.4S, Vn.4S, #n |
ShiftRightLogicalRoundedAdd(Vector128<Int64>, Vector128<Int64>, Byte) |
uint64x2_t vrsraq_n_u64 (a, uint64x2_t b, const int n uint64x2_t) A32: VRSRA. U64 Qd, Qm, #n A64: URSRA Vd.2D, Vn.2D, #n |
ShiftRightLogicalRoundedAdd(Vector128<SByte>, Vector128<SByte>, Byte) |
uint8x16_t vrsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) A32: VRSRA. U8 Qd, Qm, #n A64: URSRA Vd.16B, Vn.16B, #n |
ShiftRightLogicalRoundedAdd(Vector128<UInt16>, Vector128<UInt16>, Byte) |
uint16x8_t vrsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) A32: VRSRA. U16 Qd, Qm, #n A64: URSRA Vd.8H, Vn.8H, #n |
ShiftRightLogicalRoundedAdd(Vector128<UInt32>, Vector128<UInt32>, Byte) |
uint32x4_t vrsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) A32: VRSRA. U32 Qd, Qm, #n A64: URSRA Vd.4S, Vn.4S, #n |
ShiftRightLogicalRoundedAdd(Vector128<UInt64>, Vector128<UInt64>, Byte) |
uint64x2_t vrsraq_n_u64 (a, uint64x2_t b, const int n uint64x2_t) A32: VRSRA. U64 Qd, Qm, #n A64: URSRA Vd.2D, Vn.2D, #n |
ShiftRightLogicalRoundedAdd(Vector64<Byte>, Vector64<Byte>, Byte) |
uint8x8_t vrsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) A32: VRSRA. U8 Dd, Dm, #n A64: URSRA Vd.8B, Vn.8B, #n |
ShiftRightLogicalRoundedAdd(Vector64<Int16>, Vector64<Int16>, Byte) |
uint16x4_t vrsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) A32: VRSRA. U16 Dd, Dm, #n A64: URSRA Vd.4H, Vn.4H, #n |
ShiftRightLogicalRoundedAdd(Vector64<Int32>, Vector64<Int32>, Byte) |
uint32x2_t vrsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) A32: VRSRA. U32 Dd, Dm, #n A64: URSRA Vd.2S, Vn.2S, #n |
ShiftRightLogicalRoundedAdd(Vector64<SByte>, Vector64<SByte>, Byte) |
uint8x8_t vrsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) A32: VRSRA. U8 Dd, Dm, #n A64: URSRA Vd.8B, Vn.8B, #n |
ShiftRightLogicalRoundedAdd(Vector64<UInt16>, Vector64<UInt16>, Byte) |
uint16x4_t vrsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) A32: VRSRA. U16 Dd, Dm, #n A64: URSRA Vd.4H, Vn.4H, #n |
ShiftRightLogicalRoundedAdd(Vector64<UInt32>, Vector64<UInt32>, Byte) |
uint32x2_t vrsra_n_u32 (a, uint32x2_t b, const int n uint32x2_t) A32: VRSRA. U32 Dd, Dm, #n A64: URSRA Vd.2S, Vn.2S, #n |
ShiftRightLogicalRoundedAddScalar(Vector64<Int64>, Vector64<Int64>, Byte) |
uint64x1_t vrsra_n_u64 (a, uint64x1_t b, const int n uint64x1_t) A32: VRSRA. U64 Dd, Dm, #n A64: URSRA Dd, Dn, #n |
ShiftRightLogicalRoundedAddScalar(Vector64<UInt64>, Vector64<UInt64>, Byte) |
uint64x1_t vrsra_n_u64 (a, uint64x1_t b, const int n uint64x1_t) A32: VRSRA. U64 Dd, Dm, #n A64: URSRA Dd, Dn, #n |
ShiftRightLogicalRoundedNarrowingLower(Vector128<Int16>, Byte) |
int8x8_t vrshrn_n_s16 (a, const int n int16x8_t) A32: VRSHRN. I16 Dd, Qm, #n A64: RSHRN Vd.8B, Vn.8H, #n |
ShiftRightLogicalRoundedNarrowingLower(Vector128<Int32>, Byte) |
int16x4_t vrshrn_n_s32 (int32x4_t a, const int n) A32: VRSHRN. I32 Dd, Qm, #n A64: RSHRN Vd.4H, Vn.4S, #n |
ShiftRightLogicalRoundedNarrowingLower(Vector128<Int64>, Byte) |
int32x2_t vrshrn_n_s64 (a, const int n int64x2_t) A32: VRSHRN. I64 Dd, Qm, #n A64: RSHRN Vd.2S, Vn.2D, #n |
ShiftRightLogicalRoundedNarrowingLower(Vector128<UInt16>, Byte) |
uint8x8_t vrshrn_n_u16 (uint16x8_t a, const int n) A32: VRSHRN. I16 Dd, Qm, #n A64: RSHRN Vd.8B, Vn.8H, #n |
ShiftRightLogicalRoundedNarrowingLower(Vector128<UInt32>, Byte) |
uint16x4_t vrshrn_n_u32 (uint32x4_t a, const int n) A32: VRSHRN. I32 Dd, Qm, #n A64: RSHRN Vd.4H, Vn.4S, #n |
ShiftRightLogicalRoundedNarrowingLower(Vector128<UInt64>, Byte) |
uint32x2_t vrshrn_n_u64 (a, const int n uint64x2_t) A32: VRSHRN. I64 Dd, Qm, #n A64: RSHRN Vd.2S, Vn.2D, #n |
ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<Int16>, Byte) |
uint8x8_t vqrshrn_n_u16 (uint16x8_t a, const int n) A32: VQRSHRN. U16 Dd, Qm, #n A64: UQRSHRN Vd.8B, Vn.8H, #n |
ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<Int32>, Byte) |
uint16x4_t vqrshrn_n_u32 (uint32x4_t a, const int n) A32: VQRSHRN. U32 Dd, Qm, #n A64: UQRSHRN Vd.4H, Vn.4S, #n |
ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<Int64>, Byte) |
uint32x2_t vqrshrn_n_u64 (uint64x2_t a, const int n) A32: VQRSHRN. U64 Dd, Qm, #n A64: UQRSHRN Vd.2S, Vn.2D, #n |
ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<UInt16>, Byte) |
uint8x8_t vqrshrn_n_u16 (uint16x8_t a, const int n) A32: VQRSHRN. U16 Dd, Qm, #n A64: UQRSHRN Vd.8B, Vn.8H, #n |
ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<UInt32>, Byte) |
uint16x4_t vqrshrn_n_u32 (uint32x4_t a, const int n) A32: VQRSHRN. U32 Dd, Qm, #n A64: UQRSHRN Vd.4H, Vn.4S, #n |
ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<UInt64>, Byte) |
uint32x2_t vqrshrn_n_u64 (uint64x2_t a, const int n) A32: VQRSHRN. U64 Dd, Qm, #n A64: UQRSHRN Vd.2S, Vn.2D, #n |
ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<Byte>, Vector128<UInt16>, Byte) |
uint8x16_t vqrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) A32: VQRSHRN. U16 Dd+1, Dn, #n A64: UQRSHRN2 Vd.16B, Vn.8H, #n |
ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<Int16>, Vector128<Int32>, Byte) |
uint16x8_t vqrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) A32: VQRSHRN. U32 Dd+1, Dn, #n A64: UQRSHRN2 Vd.8H, Vn.4S, #n |
ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<Int32>, Vector128<Int64>, Byte) |
uint32x4_t vqrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) A32: VQRSHRN. U64 Dd+1, Dn, #n A64: UQRSHRN2 Vd.4S, Vn.2D, #n |
ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<SByte>, Vector128<Int16>, Byte) |
uint8x16_t vqrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) A32: VQRSHRN. U16 Dd+1, Dn, #n A64: UQRSHRN2 Vd.16B, Vn.8H, #n |
ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<UInt16>, Vector128<UInt32>, Byte) |
uint16x8_t vqrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) A32: VQRSHRN. U32 Dd+1, Dn, #n A64: UQRSHRN2 Vd.8H, Vn.4S, #n |
ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<UInt32>, Vector128<UInt64>, Byte) |
uint32x4_t vqrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) A32: VQRSHRN. U64 Dd+1, Dn, #n A64: UQRSHRN2 Vd.4S, Vn.2D, #n |
ShiftRightLogicalRoundedNarrowingUpper(Vector64<Byte>, Vector128<UInt16>, Byte) |
uint8x16_t vrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) A32: VRSHRN. I16 Dd+1, Qm, #n A64: RSHRN2 Vd.16B, Vn.8H, #n |
ShiftRightLogicalRoundedNarrowingUpper(Vector64<Int16>, Vector128<Int32>, Byte) |
int16x8_t vrshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) A32: VRSHRN. I32 Dd+1, Qm, #n A64: RSHRN2 Vd.8H, Vn.4S, #n |
ShiftRightLogicalRoundedNarrowingUpper(Vector64<Int32>, Vector128<Int64>, Byte) |
int32x4_t vrshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) A32: VRSHRN. I64 Dd+1, Qm, #n A64: RSHRN2 Vd.4S, Vn.2D, #n |
ShiftRightLogicalRoundedNarrowingUpper(Vector64<SByte>, Vector128<Int16>, Byte) |
int8x16_t vrshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) A32: VRSHRN. I16 Dd+1, Qm, #n A64: RSHRN2 Vd.16B, Vn.8H, #n |
ShiftRightLogicalRoundedNarrowingUpper(Vector64<UInt16>, Vector128<UInt32>, Byte) |
uint16x8_t vrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) A32: VRSHRN. I32 Dd+1, Qm, #n A64: RSHRN2 Vd.8H, Vn.4S, #n |
ShiftRightLogicalRoundedNarrowingUpper(Vector64<UInt32>, Vector128<UInt64>, Byte) |
uint32x4_t vrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) A32: VRSHRN. I64 Dd+1, Qm, #n A64: RSHRN2 Vd.4S, Vn.2D, #n |
ShiftRightLogicalRoundedScalar(Vector64<Int64>, Byte) |
uint64x1_t vrshr_n_u64 (a, const int n uint64x1_t) A32: VRSHR. U64 Dd, Dm, #n A64: URSHR Dd, Dn, #n |
ShiftRightLogicalRoundedScalar(Vector64<UInt64>, Byte) |
uint64x1_t vrshr_n_u64 (a, const int n uint64x1_t) A32: VRSHR. U64 Dd, Dm, #n A64: URSHR Dd, Dn, #n |
ShiftRightLogicalScalar(Vector64<Int64>, Byte) |
uint64x1_t vshr_n_u64 (a, const int n uint64x1_t) A32: VSHR. U64 Dd, Dm, #n A64: USHR Dd, Dn, #n |
ShiftRightLogicalScalar(Vector64<UInt64>, Byte) |
uint64x1_t vshr_n_u64 (a, const int n uint64x1_t) A32: VSHR. U64 Dd, Dm, #n A64: USHR Dd, Dn, #n |
SignExtendWideningLower(Vector64<Int16>) |
int32x4_t vmovl_s16 (int16x4_t a) A32: VMOVL. S16 Qd, Dm A64: SXTL Vd.4S, Vn.4H |
SignExtendWideningLower(Vector64<Int32>) |
int64x2_t vmovl_s32 (int32x2_t a) A32: VMOVL. S32 Qd, Dm A64: SXTL Vd.2D, Vn.2S |
SignExtendWideningLower(Vector64<SByte>) |
int16x8_t vmovl_s8 (int8x8_t a) A32: VMOVL. S8 Qd, Dm A64: SXTL Vd.8H, Vn.8B |
SignExtendWideningUpper(Vector128<Int16>) |
int32x4_t vmovl_high_s16 (int16x8_t a) A32: VMOVL. S16 Qd, Dm+1 A64: SXTL2 Vd.4S, Vn.8H |
SignExtendWideningUpper(Vector128<Int32>) |
int64x2_t vmovl_high_s32 (int32x4_t a) A32: VMOVL. S32 Qd, Dm+1 A64: SXTL2 Vd.2D, Vn.4S |
SignExtendWideningUpper(Vector128<SByte>) |
int16x8_t vmovl_high_s8 (int8x16_t a) A32: VMOVL. S8 Qd, Dm+1 A64: SXTL2 Vd.8H, Vn.16B |
SqrtScalar(Vector64<Double>) |
float64x1_t vsqrt_f64 (float64x1_t a) A32: VSQRT. F64 Dd, Dm A64: FSQRT Dd, Dn |
SqrtScalar(Vector64<Single>) |
float32_t vsqrts_f32 (float32_t a) A32: VSQRT. F32 Sd, Sm A64: FSQRT Sd, Sn Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Store(Byte*, Vector128<Byte>) |
void vst1q_u8 (uint8_t * ptr, uint8x16_t val) A32: VST1.8 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.16B }, [Xn] |
Store(Byte*, Vector64<Byte>) |
void vst1_u8 (uint8_t * ptr, uint8x8_t val) A32: VST1.8 { Dd }, [Rn] A64: ST1 { Vt.8B }, [Xn] |
Store(Double*, Vector128<Double>) |
void vst1q_f64 (float64_t * ptr, float64x2_t val) A32: VST1.64 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.2D }, [Xn] |
Store(Double*, Vector64<Double>) |
void vst1_f64 (float64_t * ptr, float64x1_t val) A32: VST1.64 { Dd }, [Rn] A64: ST1 { Vt.1D }, [Xn] |
Store(Int16*, Vector128<Int16>) |
void vst1q_s16 (int16_t * ptr, int16x8_t val) A32: VST1.16 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.8H }, [Xn] |
Store(Int16*, Vector64<Int16>) |
void vst1_s16 (int16_t * ptr, int16x4_t val) A32: VST1.16 { Dd }, [Rn] A64: ST1 {Vt.4H }, [Xn] |
Store(Int32*, Vector128<Int32>) |
void vst1q_s32 (int32_t * ptr, int32x4_t val) A32: VST1.32 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.4S }, [Xn] |
Store(Int32*, Vector64<Int32>) |
void vst1_s32 (int32_t * ptr, int32x2_t val) A32: VST1.32 { Dd }, [Rn] A64: ST1 { Vt.2S }, [Xn] |
Store(Int64*, Vector128<Int64>) |
void vst1q_s64 (int64_t * ptr, int64x2_t val) A32: VST1.64 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.2D }, [Xn] |
Store(Int64*, Vector64<Int64>) |
void vst1_s64 (int64_t * ptr, int64x1_t val) A32: VST1.64 { Dd }, [Rn] A64: ST1 { Vt.1D }, [Xn] |
Store(SByte*, Vector128<SByte>) |
void vst1q_s8 (int8_t * ptr, int8x16_t val) A32: VST1.8 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.16B }, [Xn] |
Store(SByte*, Vector64<SByte>) |
void vst1_s8 (int8_t * ptr, int8x8_t val) A32: VST1.8 { Dd }, [Rn] A64: ST1 { Vt.8B }, [Xn] |
Store(Single*, Vector128<Single>) |
void vst1q_f32 (float32_t * ptr, float32x4_t val) A32: VST1.32 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.4S }, [Xn] |
Store(Single*, Vector64<Single>) |
void vst1_f32 (float32_t * ptr, float32x2_t val) A32: VST1.32 { Dd }, [Rn] A64: ST1 { Vt.2S }, [Xn] |
Store(UInt16*, Vector128<UInt16>) |
void vst1q_u16 (uint16_t * ptr, uint16x8_t val) A32: VST1.16 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.8H }, [Xn] |
Store(UInt16*, Vector64<UInt16>) |
void vst1_u16 (uint16_t * ptr, uint16x4_t val) A32: VST1.16 { Dd }, [Rn] A64: ST1 { Vt.4H }, [Xn] |
Store(UInt32*, Vector128<UInt32>) |
void vst1q_u32 (uint32_t * ptr, uint32x4_t val) A32: VST1.32 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.4S }, [Xn] |
Store(UInt32*, Vector64<UInt32>) |
void vst1_u32 (uint32_t * ptr, uint32x2_t val) A32: VST1.32 { Dd }, [Rn] A64: ST1 { Vt.2S }, [Xn] |
Store(UInt64*, Vector128<UInt64>) |
void vst1q_u64 (uint64_t * ptr, uint64x2_t val) A32: VST1.64 { Dd, Dd+1 }, [Rn] A64: ST1 { Vt.2D }, [Xn] |
Store(UInt64*, Vector64<UInt64>) |
void vst1_u64 (uint64_t * ptr, uint64x1_t val) A32: VST1.64 { Dd }, [Rn] A64: ST1 { Vt.1D }, [Xn] |
StoreSelectedScalar(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>,Vector64<Byte>,Vector64<Byte>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>,Vector64<Byte>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Byte*, Vector128<Byte>, Byte) |
void vst1q_lane_u8 (uint8_t * ptr, uint8x16_t val, const int lane) A32: VST1.8 { Dd[index] }, [Rn] A64: ST1 { Vt.B }[index], [Xn] |
StoreSelectedScalar(Byte*, Vector64<Byte>, Byte) |
void vst1_lane_u8 (uint8_t * ptr, uint8x8_t val, const int lane) A32: VST1.8 { Dd[index] }, [Rn] A64: ST1 { Vt.B }[index], [Xn] |
StoreSelectedScalar(Double*, Vector128<Double>, Byte) |
void vst1q_lane_f64 (float64_t * ptr, float64x2_t val, const int lane) A32: VSTR.64 Dd, [Rn] A64: ST1 { Vt.D }[index], [Xn] |
StoreSelectedScalar(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>,Vector64<Int16>,Vector64<Int16>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>,Vector64<Int16>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Int16*, Vector128<Int16>, Byte) |
void vst1q_lane_s16 (int16_t * ptr, int16x8_t val, const int lane) A32: VST1.16 { Dd[index] }, [Rn] A64: ST1 { Vt.H }[index], [Xn] |
StoreSelectedScalar(Int16*, Vector64<Int16>, Byte) |
void vst1_lane_s16 (int16_t * ptr, int16x4_t val, const int lane) A32: VST1.16 { Dd[index] }, [Rn] A64: ST1 { Vt.H }[index], [Xn] |
StoreSelectedScalar(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>,Vector64<Int32>,Vector64<Int32>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>,Vector64<Int32>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Int32*, Vector128<Int32>, Byte) |
void vst1q_lane_s32 (int32_t * ptr, int32x4_t val, const int lane) A32: VST1.32 { Dd[index] }, [Rn] A64: ST1 { Vt.S }[index], [Xn] |
StoreSelectedScalar(Int32*, Vector64<Int32>, Byte) |
void vst1_lane_s32 (int32_t * ptr, int32x2_t val, const int lane) A32: VST1.32 { Dd[index] }, [Rn] A64: ST1 { Vt.S }[index], [Xn] |
StoreSelectedScalar(Int64*, Vector128<Int64>, Byte) |
void vst1q_lane_s64 (int64_t * ptr, int64x2_t val, const int lane) A32: VSTR.64 Dd, [Rn] A64: ST1 { Vt.D }[index], [Xn] |
StoreSelectedScalar(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>,Vector64<SByte>,Vector64<SByte>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>,Vector64<SByte>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(SByte*, Vector128<SByte>, Byte) |
void vst1q_lane_s8 (int8_t * ptr, int8x16_t val, const int lane) A32: VST1.8 { Dd[index] }, [Rn] A64: ST1 { Vt.B }[index], [Xn] |
StoreSelectedScalar(SByte*, Vector64<SByte>, Byte) |
void vst1_lane_s8 (int8_t * ptr, int8x8_t val, const int lane) A32: VST1.8 { Dd[index] }, [Rn] A64: ST1 { Vt.B }[index], [Xn] |
StoreSelectedScalar(Single*, ValueTuple<Vector64<Single>,Vector64<Single>,Vector64<Single>,Vector64<Single>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Single*, ValueTuple<Vector64<Single>,Vector64<Single>,Vector64<Single>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Single*, ValueTuple<Vector64<Single>,Vector64<Single>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(Single*, Vector128<Single>, Byte) |
void vst1q_lane_f32 (float32_t * ptr, float32x4_t val, const int lane) A32: VST1.32 { Dd[index] }, [Rn] A64: ST1 { Vt.S }[index], [Xn] |
StoreSelectedScalar(Single*, Vector64<Single>, Byte) |
void vst1_lane_f32 (float32_t * ptr, float32x2_t val, const int lane) A32: VST1.32 { Dd[index] }, [Rn] A64: ST1 { Vt.S }[index], [Xn] |
StoreSelectedScalar(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(UInt16*, Vector128<UInt16>, Byte) |
void vst1q_lane_u16 (uint16_t * ptr, uint16x8_t val, const int lane) A32: VST1.16 { Dd[index] }, [Rn] A64: ST1 { Vt.H }[index], [Xn] |
StoreSelectedScalar(UInt16*, Vector64<UInt16>, Byte) |
void vst1_lane_u16 (uint16_t * ptr, uint16x4_t val, const int lane) A32: VST1.16 { Dd[index] }, [Rn] A64: ST1 { Vt.H }[index], [Xn] |
StoreSelectedScalar(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>>, Byte) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreSelectedScalar(UInt32*, Vector128<UInt32>, Byte) |
void vst1q_lane_u32 (uint32_t * ptr, uint32x4_t val, const int lane) A32: VST1.32 { Dd[index] }, [Rn] A64: ST1 { Vt.S }[index], [Xn] |
StoreSelectedScalar(UInt32*, Vector64<UInt32>, Byte) |
void vst1_lane_u32 (uint32_t * ptr, uint32x2_t val, const int lane) A32: VST1.32 { Dd[index] }, [Rn] A64: ST1 { Vt.S }[index], [Xn] |
StoreSelectedScalar(UInt64*, Vector128<UInt64>, Byte) |
void vst1q_lane_u64 (uint64_t * ptr, uint64x2_t val, const int lane) A32: VSTR.64 Dd, [Rn] A64: ST1 { Vt.D }[index], [Xn] |
StoreVector64x2(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2(Single*, ValueTuple<Vector64<Single>,Vector64<Single>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2AndZip(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2AndZip(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2AndZip(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2AndZip(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2AndZip(Single*, ValueTuple<Vector64<Single>,Vector64<Single>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2AndZip(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x2AndZip(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>,Vector64<Byte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>,Vector64<Int16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>,Vector64<Int32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>,Vector64<SByte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3(Single*, ValueTuple<Vector64<Single>,Vector64<Single>,Vector64<Single>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3AndZip(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>,Vector64<Byte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3AndZip(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>,Vector64<Int16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3AndZip(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>,Vector64<Int32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3AndZip(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>,Vector64<SByte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3AndZip(Single*, ValueTuple<Vector64<Single>,Vector64<Single>,Vector64<Single>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3AndZip(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x3AndZip(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>,Vector64<Byte>,Vector64<Byte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>,Vector64<Int16>,Vector64<Int16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>,Vector64<Int32>,Vector64<Int32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>,Vector64<SByte>,Vector64<SByte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4(Single*, ValueTuple<Vector64<Single>,Vector64<Single>,Vector64<Single>,Vector64<Single>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4AndZip(Byte*, ValueTuple<Vector64<Byte>,Vector64<Byte>,Vector64<Byte>,Vector64<Byte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4AndZip(Int16*, ValueTuple<Vector64<Int16>,Vector64<Int16>,Vector64<Int16>,Vector64<Int16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4AndZip(Int32*, ValueTuple<Vector64<Int32>,Vector64<Int32>,Vector64<Int32>,Vector64<Int32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4AndZip(SByte*, ValueTuple<Vector64<SByte>,Vector64<SByte>,Vector64<SByte>,Vector64<SByte>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4AndZip(Single*, ValueTuple<Vector64<Single>,Vector64<Single>,Vector64<Single>,Vector64<Single>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4AndZip(UInt16*, ValueTuple<Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>,Vector64<UInt16>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
StoreVector64x4AndZip(UInt32*, ValueTuple<Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>,Vector64<UInt32>>) |
Bu sınıf, iç bilgiler aracılığıyla ARM AdvSIMD donanım yönergelerine erişim sağlar. |
Subtract(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vsubq_u8 (a, uint8x16_t b uint8x16_t) A32: VSUB. I8 Qd, Qn, Qm A64: SUB Vd.16B, Vn.16B, Vm.16B |
Subtract(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vsubq_s16 (int16x8_t a, int16x8_t b) A32: VSUB. I16 Qd, Qn, Qm A64: SUB Vd.8H, Vn.8H, Vm.8H |
Subtract(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vsubq_s32 (int32x4_t a, int32x4_t b) A32: VSUB. I32 Qd, Qn, Qm A64: SUB Vd.4S, Vn.4S, Vm.4S |
Subtract(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vsubq_s64 (int64x2_t a, int64x2_t b) A32: VSUB. I64 Qd, Qn, Qm A64: SUB Vd.2D, Vn.2D, Vm.2D |
Subtract(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vsubq_s8 (int8x16_t a, int8x16_t b) A32: VSUB. I8 Qd, Qn, Qm A64: SUB Vd.16B, Vn.16B, Vm.16B |
Subtract(Vector128<Single>, Vector128<Single>) |
float32x4_t vsubq_f32 (float32x4_t a, float32x4_t b) A32: VSUB. F32 Qd, Qn, Qm A64: FSUB Vd.4S, Vn.4S, Vm.4S |
Subtract(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vsubq_u16 (uint16x8_t a, uint16x8_t b) A32: VSUB. I16 Qd, Qn, Qm A64: SUB Vd.8H, Vn.8H, Vm.8H |
Subtract(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vsubq_u32 (uint32x4_t a, uint32x4_t b) A32: VSUB. I32 Qd, Qn, Qm A64: SUB Vd.4S, Vn.4S, Vm.4S |
Subtract(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vsubq_u64 (uint64x2_t a, uint64x2_t b) A32: VSUB. I64 Qd, Qn, Qm A64: SUB Vd.2D, Vn.2D, Vm.2D |
Subtract(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vsub_u8 (uint8x8_t a, uint8x8_t b) A32: VSUB. I8 Dd, Dn, Dm A64: SUB Vd.8B, Vn.8B, Vm.8B |
Subtract(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vsub_s16 (int16x4_t a, int16x4_t b) A32: VSUB. I16 Dd, Dn, Dm A64: SUB Vd.4H, Vn.4H, Vm.4H |
Subtract(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vsub_s32 (int32x2_t a, int32x2_t b) A32: VSUB. I32 Dd, Dn, Dm A64: SUB Vd.2S, Vn.2S, Vm.2S |
Subtract(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vsub_s8 (int8x8_t a, int8x8_t b) A32: VSUB. I8 Dd, Dn, Dm A64: SUB Vd.8B, Vn.8B, Vm.8B |
Subtract(Vector64<Single>, Vector64<Single>) |
float32x2_t vsub_f32 (float32x2_t a, float32x2_t b) A32: VSUB. F32 Dd, Dn, Dm A64: FSUB Vd.2S, Vn.2S, Vm.2S |
Subtract(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vsub_u16 (uint16x4_t a, uint16x4_t b) A32: VSUB. I16 Dd, Dn, Dm A64: SUB Vd.4H, Vn.4H, Vm.4H |
Subtract(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vsub_u32 (uint32x2_t a, uint32x2_t b) A32: VSUB. I32 Dd, Dn, Dm A64: SUB Vd.2S, Vn.2S, Vm.2S |
SubtractHighNarrowingLower(Vector128<Int16>, Vector128<Int16>) |
int8x8_t vsubhn_s16 (int16x8_t a, int16x8_t b) A32: VSUBHN. I16 Dd, Qn, Qm A64: SUBHN Vd.8B, Vn.8H, Vm.8H |
SubtractHighNarrowingLower(Vector128<Int32>, Vector128<Int32>) |
int16x4_t vsubhn_s32 (int32x4_t a, int32x4_t b) A32: VSUBHN. I32 Dd, Qn, Qm A64: SUBHN Vd.4H, Vn.4S, Vm.4S |
SubtractHighNarrowingLower(Vector128<Int64>, Vector128<Int64>) |
int32x2_t vsubhn_s64 (int64x2_t a, int64x2_t b) A32: VSUBHN. I64 Dd, Qn, Qm A64: SUBHN Vd.2S, Vn.2D, Vm.2D |
SubtractHighNarrowingLower(Vector128<UInt16>, Vector128<UInt16>) |
uint8x8_t vsubhn_u16 (uint16x8_t a, uint16x8_t b) A32: VSUBHN. I16 Dd, Qn, Qm A64: SUBHN Vd.8B, Vn.8H, Vm.8H |
SubtractHighNarrowingLower(Vector128<UInt32>, Vector128<UInt32>) |
uint16x4_t vsubhn_u32 (uint32x4_t a, uint32x4_t b) A32: VSUBHN. I32 Dd, Qn, Qm A64: SUBHN Vd.4H, Vn.4S, Vm.4S |
SubtractHighNarrowingLower(Vector128<UInt64>, Vector128<UInt64>) |
uint32x2_t vsubhn_u64 (uint64x2_t a, uint64x2_t b) A32: VSUBHN. I64 Dd, Qn, Qm A64: SUBHN Vd.2S, Vn.2D, Vm.2D |
SubtractHighNarrowingUpper(Vector64<Byte>, Vector128<UInt16>, Vector128<UInt16>) |
uint8x16_t vsubhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) A32: VSUBHN. I16 Dd+1, Soru-Cevap, Soru-Cevap A64: SUBHN2 Vd.16B, Vn.8H, Vm.8H |
SubtractHighNarrowingUpper(Vector64<Int16>, Vector128<Int32>, Vector128<Int32>) |
int16x8_t vsubhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) A32: VSUBHN. I32 Dd+1, Qn, Qm A64: SUBHN2 Vd.8H, Vn.4S, Vm.4S |
SubtractHighNarrowingUpper(Vector64<Int32>, Vector128<Int64>, Vector128<Int64>) |
int32x4_t vsubhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) A32: VSUBHN. I64 Dd+1, Soru-Cevap, Soru-Cevap A64: SUBHN2 Vd.4S, Vn.2D, Vm.2D |
SubtractHighNarrowingUpper(Vector64<SByte>, Vector128<Int16>, Vector128<Int16>) |
int8x16_t vsubhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) A32: VSUBHN. I16 Dd+1, Soru-Cevap, Soru-Cevap A64: SUBHN2 Vd.16B, Vn.8H, Vm.8H |
SubtractHighNarrowingUpper(Vector64<UInt16>, Vector128<UInt32>, Vector128<UInt32>) |
uint16x8_t vsubhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) A32: VSUBHN. I32 Dd+1, Qn, Qm A64: SUBHN2 Vd.8H, Vn.4S, Vm.4S |
SubtractHighNarrowingUpper(Vector64<UInt32>, Vector128<UInt64>, Vector128<UInt64>) |
uint32x4_t vsubhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) A32: VSUBHN. I64 Dd+1, Soru-Cevap, Soru-Cevap A64: SUBHN2 Vd.4S, Vn.2D, Vm.2D |
SubtractRoundedHighNarrowingLower(Vector128<Int16>, Vector128<Int16>) |
int8x8_t vrsubhn_s16 (int16x8_t a, int16x8_t b) A32: VRSUBHN. I16 Dd, Qn, Qm A64: RSUBHN Vd.8B, Vn.8H, Vm.8H |
SubtractRoundedHighNarrowingLower(Vector128<Int32>, Vector128<Int32>) |
int16x4_t vrsubhn_s32 (int32x4_t a, int32x4_t b) A32: VRSUBHN. I32 Dd, Qn, Qm A64: RSUBHN Vd.4H, Vn.4S, Vm.4S |
SubtractRoundedHighNarrowingLower(Vector128<Int64>, Vector128<Int64>) |
int32x2_t vrsubhn_s64 (int64x2_t a, int64x2_t b) A32: VRSUBHN. I64 Dd, Qn, Qm A64: RSUBHN Vd.2S, Vn.2D, Vm.2D |
SubtractRoundedHighNarrowingLower(Vector128<UInt16>, Vector128<UInt16>) |
uint8x8_t vrsubhn_u16 (uint16x8_t a, uint16x8_t b) A32: VRSUBHN. I16 Dd, Qn, Qm A64: RSUBHN Vd.8B, Vn.8H, Vm.8H |
SubtractRoundedHighNarrowingLower(Vector128<UInt32>, Vector128<UInt32>) |
uint16x4_t vrsubhn_u32 (uint32x4_t a, uint32x4_t b) A32: VRSUBHN. I32 Dd, Qn, Qm A64: RSUBHN Vd.4H, Vn.4S, Vm.4S |
SubtractRoundedHighNarrowingLower(Vector128<UInt64>, Vector128<UInt64>) |
uint32x2_t vrsubhn_u64 (uint64x2_t a, uint64x2_t b) A32: VRSUBHN. I64 Dd, Qn, Qm A64: RSUBHN Vd.2S, Vn.2D, Vm.2D |
SubtractRoundedHighNarrowingUpper(Vector64<Byte>, Vector128<UInt16>, Vector128<UInt16>) |
uint8x16_t vrsubhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) A32: VRSUBHN. I16 Dd+1, Soru-Cevap, Soru-Cevap A64: RSUBHN2 Vd.16B, Vn.8H, Vm.8H |
SubtractRoundedHighNarrowingUpper(Vector64<Int16>, Vector128<Int32>, Vector128<Int32>) |
int16x8_t vrsubhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) A32: VRSUBHN. I32 Dd+1, Qn, Qm A64: RSUBHN2 Vd.8H, Vn.4S, Vm.4S |
SubtractRoundedHighNarrowingUpper(Vector64<Int32>, Vector128<Int64>, Vector128<Int64>) |
int32x4_t vrsubhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) A32: VRSUBHN. I64 Dd+1, Soru-Cevap, Soru-Cevap A64: RSUBHN2 Vd.4S, Vn.2D, Vm.2D |
SubtractRoundedHighNarrowingUpper(Vector64<SByte>, Vector128<Int16>, Vector128<Int16>) |
int8x16_t vrsubhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) A32: VRSUBHN. I16 Dd+1, Soru-Cevap, Soru-Cevap A64: RSUBHN2 Vd.16B, Vn.8H, Vm.8H |
SubtractRoundedHighNarrowingUpper(Vector64<UInt16>, Vector128<UInt32>, Vector128<UInt32>) |
uint16x8_t vrsubhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) A32: VRSUBHN. I32 Dd+1, Qn, Qm A64: RSUBHN2 Vd.8H, Vn.4S, Vm.4S |
SubtractRoundedHighNarrowingUpper(Vector64<UInt32>, Vector128<UInt64>, Vector128<UInt64>) |
uint32x4_t vrsubhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) A32: VRSUBHN. I64 Dd+1, Soru-Cevap, Soru-Cevap A64: RSUBHN2 Vd.4S, Vn.2D, Vm.2D |
SubtractSaturate(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t vqsubq_u8 (uint8x16_t a, uint8x16_t b) A32: VQSUB. U8 Qd, Qn, Qm A64: UQSUB Vd.16B, Vn.16B, Vm.16B |
SubtractSaturate(Vector128<Int16>, Vector128<Int16>) |
int16x8_t vqsubq_s16 (a, int16x8_t b int16x8_t) A32: VQSUB. S16 Qd, Qn, Qm A64: SQSUB Vd.8H, Vn.8H, Vm.8H |
SubtractSaturate(Vector128<Int32>, Vector128<Int32>) |
int32x4_t vqsubq_s32 (int32x4_t a, int32x4_t b) A32: VQSUB. S32 Qd, Qn, Qm A64: SQSUB Vd.4S, Vn.4S, Vm.4S |
SubtractSaturate(Vector128<Int64>, Vector128<Int64>) |
int64x2_t vqsubq_s64 (int64x2_t a, int64x2_t b) A32: VQSUB. S64 Qd, Qn, Qm A64: SQSUB Vd.2D, Vn.2D, Vm.2D |
SubtractSaturate(Vector128<SByte>, Vector128<SByte>) |
int8x16_t vqsubq_s8 (int8x16_t a, int8x16_t b) A32: VQSUB. S8 Qd, Qn, Qm A64: SQSUB Vd.16B, Vn.16B, Vm.16B |
SubtractSaturate(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t vqsubq_u16 (uint16x8_t a, uint16x8_t b) A32: VQSUB. U16 Qd, Qn, Qm A64: UQSUB Vd.8H, Vn.8H, Vm.8H |
SubtractSaturate(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t vqsubq_u32 (uint32x4_t a, uint32x4_t b) A32: VQSUB. U32 Qd, Qn, Qm A64: UQSUB Vd.4S, Vn.4S, Vm.4S |
SubtractSaturate(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t vqsubq_u64 (uint64x2_t a, uint64x2_t b) A32: VQSUB. U64 Qd, Qn, Qm A64: UQSUB Vd.2D, Vn.2D, Vm.2D |
SubtractSaturate(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t vqsub_u8 (uint8x8_t a, uint8x8_t b) A32: VQSUB. U8 Dd, Dn, Dm A64: UQSUB Vd.8B, Vn.8B, Vm.8B |
SubtractSaturate(Vector64<Int16>, Vector64<Int16>) |
int16x4_t vqsub_s16 (int16x4_t a, int16x4_t b) A32: VQSUB. S16 Dd, Dn, Dm A64: SQSUB Vd.4H, Vn.4H, Vm.4H |
SubtractSaturate(Vector64<Int32>, Vector64<Int32>) |
int32x2_t vqsub_s32 (int32x2_t a, int32x2_t b) A32: VQSUB. S32 Dd, Dn, Dm A64: SQSUB Vd.2S, Vn.2S, Vm.2S |
SubtractSaturate(Vector64<SByte>, Vector64<SByte>) |
int8x8_t vqsub_s8 (int8x8_t a, int8x8_t b) A32: VQSUB. S8 Dd, Dn, Dm A64: SQSUB Vd.8B, Vn.8B, Vm.8B |
SubtractSaturate(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t vqsub_u16 (uint16x4_t a, uint16x4_t b) A32: VQSUB. U16 Dd, Dn, Dm A64: UQSUB Vd.4H, Vn.4H, Vm.4H |
SubtractSaturate(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t vqsub_u32 (uint32x2_t a, uint32x2_t b) A32: VQSUB. U32 Dd, Dn, Dm A64: UQSUB Vd.2S, Vn.2S, Vm.2S |
SubtractSaturateScalar(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vqsub_s64 (int64x1_t a, int64x1_t b) A32: VQSUB. S64 Dd, Dn, Dm A64: SQSUB Dd, Dn, Dm |
SubtractSaturateScalar(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vqsub_u64 (uint64x1_t a, uint64x1_t b) A32: VQSUB. U64 Dd, Dn, Dm A64: UQSUB Dd, Dn, Dm |
SubtractScalar(Vector64<Double>, Vector64<Double>) |
float64x1_t vsub_f64 (float64x1_t a, float64x1_t b) A32: VSUB. F64 Dd, Dn, Dm A64: FSUB Dd, Dn, Dm |
SubtractScalar(Vector64<Int64>, Vector64<Int64>) |
int64x1_t vsub_s64 (int64x1_t a, int64x1_t b) A32: VSUB. I64 Dd, Dn, Dm A64: SUB Dd, Dn, Dm |
SubtractScalar(Vector64<Single>, Vector64<Single>) |
float32_t vsubs_f32 (float32_t a, float32_t b) A32: VSUB. F32 Sd, Sn, Sm A64: FSUB Sd, Sn, Sm Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
SubtractScalar(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t vsub_u64 (uint64x1_t a, uint64x1_t b) A32: VSUB. I64 Dd, Dn, Dm A64: SUB Dd, Dn, Dm |
SubtractWideningLower(Vector128<Int16>, Vector64<SByte>) |
int16x8_t vsubw_s8 (int16x8_t a, int8x8_t b) A32: VSUBW. S8 Qd, Qn, Dm A64: SSUBW Vd.8H, Vn.8H, Vm.8B |
SubtractWideningLower(Vector128<Int32>, Vector64<Int16>) |
int32x4_t vsubw_s16 (int32x4_t a, int16x4_t b) A32: VSUBW. S16 Qd, Qn, Dm A64: SSUBW Vd.4S, Vn.4S, Vm.4H |
SubtractWideningLower(Vector128<Int64>, Vector64<Int32>) |
int64x2_t vsubw_s32 (int64x2_t a, int32x2_t b) A32: VSUBW. S32 Qd, Qn, Dm A64: SSUBW Vd.2D, Vn.2D, Vm.2S |
SubtractWideningLower(Vector128<UInt16>, Vector64<Byte>) |
uint16x8_t vsubw_u8 (uint16x8_t a, uint8x8_t b) A32: VSUBW. U8 Qd, Qn, Dm A64: USUBW Vd.8H, Vn.8H, Vm.8B |
SubtractWideningLower(Vector128<UInt32>, Vector64<UInt16>) |
uint32x4_t vsubw_u16 (uint32x4_t a, uint16x4_t b) A32: VSUBW. U16 Qd, Qn, Dm A64: USUBW Vd.4S, Vn.4S, Vm.4H |
SubtractWideningLower(Vector128<UInt64>, Vector64<UInt32>) |
uint64x2_t vsubw_u32 (uint64x2_t a, uint32x2_t b) A32: VSUBW. U32 Qd, Qn, Dm A64: USUBW Vd.2D, Vn.2D, Vm.2S |
SubtractWideningLower(Vector64<Byte>, Vector64<Byte>) |
uint16x8_t vsubl_u8 (uint8x8_t a, uint8x8_t b) A32: VSUBL. U8 Qd, Dn, Dm A64: USUBL Vd.8H, Vn.8B, Vm.8B |
SubtractWideningLower(Vector64<Int16>, Vector64<Int16>) |
int32x4_t vsubl_s16 (int16x4_t a, int16x4_t b) A32: VSUBL. S16 Qd, Dn, Dm A64: SSUBL Vd.4S, Vn.4H, Vm.4H |
SubtractWideningLower(Vector64<Int32>, Vector64<Int32>) |
int64x2_t vsubl_s32 (int32x2_t a, int32x2_t b) A32: VSUBL. S32 Qd, Dn, Dm A64: SSUBL Vd.2D, Vn.2S, Vm.2S |
SubtractWideningLower(Vector64<SByte>, Vector64<SByte>) |
int16x8_t vsubl_s8 (int8x8_t a, int8x8_t b) A32: VSUBL. S8 Qd, Dn, Dm A64: SSUBL Vd.8H, Vn.8B, Vm.8B |
SubtractWideningLower(Vector64<UInt16>, Vector64<UInt16>) |
uint32x4_t vsubl_u16 (a, uint16x4_t b uint16x4_t) A32: VSUBL. U16 Qd, Dn, Dm A64: USUBL Vd.4S, Vn.4H, Vm.4H |
SubtractWideningLower(Vector64<UInt32>, Vector64<UInt32>) |
uint64x2_t vsubl_u32 (uint32x2_t a, uint32x2_t b) A32: VSUBL. U32 Qd, Dn, Dm A64: USUBL Vd.2D, Vn.2S, Vm.2S |
SubtractWideningUpper(Vector128<Byte>, Vector128<Byte>) |
uint16x8_t vsubl_high_u8 (uint8x16_t a, uint8x16_t b) A32: VSUBL. U8 Qd, Dn+1, Dm+1 A64: USUBL2 Vd.8H, Vn.16B, Vm.16B |
SubtractWideningUpper(Vector128<Int16>, Vector128<Int16>) |
int32x4_t vsubl_high_s16 (int16x8_t a, int16x8_t b) A32: VSUBL. S16 Qd, Dn+1, Dm+1 A64: SSUBL2 Vd.4S, Vn.8H, Vm.8H |
SubtractWideningUpper(Vector128<Int16>, Vector128<SByte>) |
int16x8_t vsubw_high_s8 (int16x8_t a, int8x16_t b) A32: VSUBW. S8 Qd, Qn, Dm+1 A64: SSUBW2 Vd.8H, Vn.8H, Vm.16B |
SubtractWideningUpper(Vector128<Int32>, Vector128<Int16>) |
int32x4_t vsubw_high_s16 (int32x4_t a, int16x8_t b) A32: VSUBW. S16 Qd, Qn, Dm+1 A64: SSUBW2 Vd.4S, Vn.4S, Vm.8H |
SubtractWideningUpper(Vector128<Int32>, Vector128<Int32>) |
int64x2_t vsubl_high_s32 (int32x4_t a, int32x4_t b) A32: VSUBL. S32 Qd, Dn+1, Dm+1 A64: SSUBL2 Vd.2D, Vn.4S, Vm.4S |
SubtractWideningUpper(Vector128<Int64>, Vector128<Int32>) |
int64x2_t vsubw_high_s32 (int64x2_t a, int32x4_t b) A32: VSUBW. S32 Qd, Qn, Dm+1 A64: SSUBW2 Vd.2D, Vn.2D, Vm.4S |
SubtractWideningUpper(Vector128<SByte>, Vector128<SByte>) |
int16x8_t vsubl_high_s8 (int8x16_t a, int8x16_t b) A32: VSUBL. S8 Qd, Dn+1, Dm+1 A64: SSUBL2 Vd.8H, Vn.16B, Vm.16B |
SubtractWideningUpper(Vector128<UInt16>, Vector128<Byte>) |
uint16x8_t vsubw_high_u8 (uint16x8_t a, uint8x16_t b) A32: VSUBW. U8 Qd, Qn, Dm+1 A64: USUBW2 Vd.8H, Vn.8H, Vm.16B |
SubtractWideningUpper(Vector128<UInt16>, Vector128<UInt16>) |
uint32x4_t vsubl_high_u16 (uint16x8_t a, uint16x8_t b) A32: VSUBL. U16 Qd, Dn+1, Dm+1 A64: USUBL2 Vd.4S, Vn.8H, Vm.8H |
SubtractWideningUpper(Vector128<UInt32>, Vector128<UInt16>) |
uint32x4_t vsubw_high_u16 (uint32x4_t a, uint16x8_t b) A32: VSUBW. U16 Qd, Qn, Dm+1 A64: USUBW2 Vd.4S, Vn.4S, Vm.8H |
SubtractWideningUpper(Vector128<UInt32>, Vector128<UInt32>) |
uint64x2_t vsubl_high_u32 (uint32x4_t a, uint32x4_t b) A32: VSUBL. U32 Qd, Dn+1, Dm+1 A64: USUBL2 Vd.2D, Vn.4S, Vm.4S |
SubtractWideningUpper(Vector128<UInt64>, Vector128<UInt32>) |
uint64x2_t vsubw_high_u32 (uint64x2_t a, uint32x4_t b) A32: VSUBW. U32 Qd, Qn, Dm+1 A64: USUBW2 Vd.2D, Vn.2D, Vm.4S |
ToString() |
Geçerli nesneyi temsil eden dizeyi döndürür. (Devralındığı yer: Object) |
VectorTableLookup(ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Vector64<Byte>) |
uint8x8_t vqtbl4q_u8(uint8x16x4_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B |
VectorTableLookup(ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Vector64<Byte>) |
uint8x8_t vqtbl3q_u8(uint8x16x3_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B |
VectorTableLookup(ValueTuple<Vector128<Byte>,Vector128<Byte>>, Vector64<Byte>) |
uint8x8_t vqtbl2q_u8(uint8x16x2_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B |
VectorTableLookup(ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Vector64<SByte>) |
int8x8_t vqtbl4q_u8(int8x16x4_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B |
VectorTableLookup(ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Vector64<SByte>) |
int8x8_t vqtbl3q_u8(int8x16x3_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B |
VectorTableLookup(ValueTuple<Vector128<SByte>,Vector128<SByte>>, Vector64<SByte>) |
int8x8_t vqtbl2q_u8(int8x16x2_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B |
VectorTableLookup(Vector128<Byte>, Vector64<Byte>) |
uint8x8_t vqvtbl1_u8(uint8x16_t t, uint8x8_t idx) A32: VTBL Dd, {Dn, Dn+1}, Dm A64: TBL Vd.8B, {Vn.16B}, Vm.8B |
VectorTableLookup(Vector128<SByte>, Vector64<SByte>) |
int8x8_t vqvtbl1_s8(int8x16_t t, uint8x8_t idx) A32: VTBL Dd, {Dn, Dn+1}, Dm A64: TBL Vd.8B, {Vn.16B}, Vm.8B |
VectorTableLookupExtension(Vector64<Byte>, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Vector64<Byte>) |
uint8x8_t vqtbx4q_u8(uint8x16x4_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B |
VectorTableLookupExtension(Vector64<Byte>, ValueTuple<Vector128<Byte>,Vector128<Byte>,Vector128<Byte>>, Vector64<Byte>) |
uint8x8_t vqtbx3q_u8(uint8x16x3_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B |
VectorTableLookupExtension(Vector64<Byte>, ValueTuple<Vector128<Byte>,Vector128<Byte>>, Vector64<Byte>) |
uint8x8_t vqtbx2q_u8(uint8x16x2_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B |
VectorTableLookupExtension(Vector64<Byte>, Vector128<Byte>, Vector64<Byte>) |
uint8x8_t vqvtbx1_u8(uint8x8_t r, uint8x16_t t, uint8x8_t idx) A32: VTBX Dd, {Dn, Dn+1}, Dm A64: TBX Vd.8B, {Vn.16B}, Vm.8B |
VectorTableLookupExtension(Vector64<SByte>, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Vector64<SByte>) |
int8x8_t vqtbx4q_u8(int8x16x4_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B |
VectorTableLookupExtension(Vector64<SByte>, ValueTuple<Vector128<SByte>,Vector128<SByte>,Vector128<SByte>>, Vector64<SByte>) |
int8x8_t vqtbx3q_u8(int8x16x3_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B |
VectorTableLookupExtension(Vector64<SByte>, ValueTuple<Vector128<SByte>,Vector128<SByte>>, Vector64<SByte>) |
int8x8_t vqtbx2q_u8(int8x16x2_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B |
VectorTableLookupExtension(Vector64<SByte>, Vector128<SByte>, Vector64<SByte>) |
int8x8_t vqvtbx1_s8(int8x8_t r, int8x16_t t, uint8x8_t idx) A32: VTBX Dd, {Dn, Dn+1}, Dm A64: TBX Vd.8B, {Vn.16B}, Vm.8B |
Xor(Vector128<Byte>, Vector128<Byte>) |
uint8x16_t veorq_u8 (uint8x16_t a, uint8x16_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B |
Xor(Vector128<Double>, Vector128<Double>) |
float64x2_t veorq_f64 (float64x2_t a, float64x2_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Xor(Vector128<Int16>, Vector128<Int16>) |
int16x8_t veorq_s16 (int16x8_t a, int16x8_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B |
Xor(Vector128<Int32>, Vector128<Int32>) |
int32x4_t veorq_s32 (int32x4_t a, int32x4_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B |
Xor(Vector128<Int64>, Vector128<Int64>) |
int64x2_t veorq_s64 (int64x2_t a, int64x2_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B |
Xor(Vector128<SByte>, Vector128<SByte>) |
int8x16_t veorq_s8 (int8x16_t a, int8x16_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B |
Xor(Vector128<Single>, Vector128<Single>) |
float32x4_t veorq_f32 (float32x4_t a, float32x4_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Xor(Vector128<UInt16>, Vector128<UInt16>) |
uint16x8_t veorq_u16 (uint16x8_t a, uint16x8_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B |
Xor(Vector128<UInt32>, Vector128<UInt32>) |
uint32x4_t veorq_u32 (uint32x4_t a, uint32x4_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B |
Xor(Vector128<UInt64>, Vector128<UInt64>) |
uint64x2_t veorq_u64 (uint64x2_t a, uint64x2_t b) A32: VEOR Qd, Qn, Qm A64: EOR Vd.16B, Vn.16B, Vm.16B |
Xor(Vector64<Byte>, Vector64<Byte>) |
uint8x8_t veor_u8 (uint8x8_t a, uint8x8_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B |
Xor(Vector64<Double>, Vector64<Double>) |
float64x1_t veor_f64 (float64x1_t a, float64x1_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Xor(Vector64<Int16>, Vector64<Int16>) |
int16x4_t veor_s16 (int16x4_t a, int16x4_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B |
Xor(Vector64<Int32>, Vector64<Int32>) |
int32x2_t veor_s32 (int32x2_t a, int32x2_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B |
Xor(Vector64<Int64>, Vector64<Int64>) |
int64x1_t veor_s64 (int64x1_t a, int64x1_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B |
Xor(Vector64<SByte>, Vector64<SByte>) |
int8x8_t veor_s8 (int8x8_t a, int8x8_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B |
Xor(Vector64<Single>, Vector64<Single>) |
float32x2_t veor_f32 (float32x2_t a, float32x2_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B Yukarıdaki yerel imza yok. Diğer skaler API'lerle tutarlılık için bu ek aşırı yüklemeyi sağlarız. |
Xor(Vector64<UInt16>, Vector64<UInt16>) |
uint16x4_t veor_u16 (uint16x4_t a, uint16x4_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B |
Xor(Vector64<UInt32>, Vector64<UInt32>) |
uint32x2_t veor_u32 (a, uint32x2_t b uint32x2_t) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B |
Xor(Vector64<UInt64>, Vector64<UInt64>) |
uint64x1_t veor_u64 (uint64x1_t a, uint64x1_t b) A32: VEOR Dd, Dn, Dm A64: EOR Vd.8B, Vn.8B, Vm.8B |
ZeroExtendWideningLower(Vector64<Byte>) |
uint16x8_t vmovl_u8 (uint8x8_t a) A32: VMOVL. U8 Qd, Dm A64: UXTL Vd.8H, Vn.8B |
ZeroExtendWideningLower(Vector64<Int16>) |
uint32x4_t vmovl_u16 (uint16x4_t a) A32: VMOVL. U16 Qd, Dm A64: UXTL Vd.4S, Vn.4H |
ZeroExtendWideningLower(Vector64<Int32>) |
uint64x2_t vmovl_u32 (uint32x2_t a) A32: VMOVL. U32 Qd, Dm A64: UXTL Vd.2D, Vn.2S |
ZeroExtendWideningLower(Vector64<SByte>) |
uint16x8_t vmovl_u8 (uint8x8_t a) A32: VMOVL. U8 Qd, Dm A64: UXTL Vd.8H, Vn.8B |
ZeroExtendWideningLower(Vector64<UInt16>) |
uint32x4_t vmovl_u16 (uint16x4_t a) A32: VMOVL. U16 Qd, Dm A64: UXTL Vd.4S, Vn.4H |
ZeroExtendWideningLower(Vector64<UInt32>) |
uint64x2_t vmovl_u32 (uint32x2_t a) A32: VMOVL. U32 Qd, Dm A64: UXTL Vd.2D, Vn.2S |
ZeroExtendWideningUpper(Vector128<Byte>) |
uint16x8_t vmovl_high_u8 (uint8x16_t a) A32: VMOVL. U8 Qd, Dm+1 A64: UXTL2 Vd.8H, Vn.16B |
ZeroExtendWideningUpper(Vector128<Int16>) |
uint32x4_t vmovl_high_u16 (uint16x8_t a) A32: VMOVL. U16 Qd, Dm+1 A64: UXTL2 Vd.4S, Vn.8H |
ZeroExtendWideningUpper(Vector128<Int32>) |
uint64x2_t vmovl_high_u32 (uint32x4_t a) A32: VMOVL. U32 Qd, Dm+1 A64: UXTL2 Vd.2D, Vn.4S |
ZeroExtendWideningUpper(Vector128<SByte>) |
uint16x8_t vmovl_high_u8 (uint8x16_t a) A32: VMOVL. U8 Qd, Dm+1 A64: UXTL2 Vd.8H, Vn.16B |
ZeroExtendWideningUpper(Vector128<UInt16>) |
uint32x4_t vmovl_high_u16 (uint16x8_t a) A32: VMOVL. U16 Qd, Dm+1 A64: UXTL2 Vd.4S, Vn.8H |
ZeroExtendWideningUpper(Vector128<UInt32>) |
uint64x2_t vmovl_high_u32 (uint32x4_t a) A32: VMOVL. U32 Qd, Dm+1 A64: UXTL2 Vd.2D, Vn.4S |